Case 1 - cassandra.rev_0f1fb_8b0e1.ColumnIndex.java
Base
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.utils.AlwaysPresentFilter;
import org.apache.cassandra.utils.IFilter;
import org.apache.cassandra.utils.FilterFactory;
public class ColumnIndex
{
public final List<IndexHelper.IndexInfo> columnsIndex;
public final IFilter bloomFilter;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList(), new AlwaysPresentFilter());
private ColumnIndex(int estimatedColumnCount)
{
this(new ArrayList<IndexHelper.IndexInfo>(), FilterFactory.getFilter(estimatedColumnCount, 4, false));
}
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex, IFilter bloomFilter)
{
this.columnsIndex = columnsIndex;
this.bloomFilter = bloomFilter;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder
{
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private final OnDiskAtom.Serializer atomSerializer;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (IColumn c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (column instanceof IColumn)
result.bloomFilter.add(column.name());
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we
// optimize that on read
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any problem because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.utils.AlwaysPresentFilter;
import org.apache.cassandra.utils.IFilter;
import org.apache.cassandra.utils.FilterFactory;
public class ColumnIndex
{
public final List<IndexHelper.IndexInfo> columnsIndex;
public final IFilter bloomFilter;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList(), new AlwaysPresentFilter());
private ColumnIndex(int estimatedColumnCount)
{
this(new ArrayList<IndexHelper.IndexInfo>(), FilterFactory.getFilter(estimatedColumnCount, 4, false));
}
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex, IFilter bloomFilter)
{
this.columnsIndex = columnsIndex;
this.bloomFilter = bloomFilter;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder
{
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private final OnDiskAtom.Serializer atomSerializer;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (IColumn c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (column instanceof IColumn)
result.bloomFilter.add(column.name());
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we
// optimize that on read
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any problem because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
Left
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.utils.AlwaysPresentFilter;
import org.apache.cassandra.utils.IFilter;
import org.apache.cassandra.utils.FilterFactory;
public class ColumnIndex
{
public final List<IndexHelper.IndexInfo> columnsIndex;
public final IFilter bloomFilter;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList(), new AlwaysPresentFilter());
private ColumnIndex(int estimatedColumnCount)
{
this(new ArrayList<IndexHelper.IndexInfo>(), FilterFactory.getFilter(estimatedColumnCount, 4, false));
}
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex, IFilter bloomFilter)
{
this.columnsIndex = columnsIndex;
this.bloomFilter = bloomFilter;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder
{
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private final OnDiskAtom.Serializer atomSerializer;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output,
boolean fromStream)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output)
{
this(cf, key, estimatedColumnCount, output, false);
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (IColumn c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (column instanceof IColumn)
result.bloomFilter.add(column.name());
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.utils.AlwaysPresentFilter;
import org.apache.cassandra.utils.IFilter;
import org.apache.cassandra.utils.FilterFactory;
public class ColumnIndex
{
public final List<IndexHelper.IndexInfo> columnsIndex;
public final IFilter bloomFilter;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList(), new AlwaysPresentFilter());
private ColumnIndex(int estimatedColumnCount)
{
this(new ArrayList<IndexHelper.IndexInfo>(), FilterFactory.getFilter(estimatedColumnCount, 4, false));
}
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex, IFilter bloomFilter)
{
this.columnsIndex = columnsIndex;
this.bloomFilter = bloomFilter;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder
{
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private final OnDiskAtom.Serializer atomSerializer;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output,
boolean fromStream)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output)
{
this(cf, key, estimatedColumnCount, output, false);
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (IColumn c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (column instanceof IColumn)
result.bloomFilter.add(column.name());
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex
{
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex)
{
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder
{
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
DataOutput output)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we
// optimize that on read
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any problem because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex
{
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex)
{
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder
{
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
DataOutput output)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we
// optimize that on read
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any problem because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
MergeMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex {
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex) {
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder {
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf, ByteBuffer key, int estimatedColumnCount, DataOutput output, boolean fromStream) {
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
public Builder(ColumnFamily cf, ByteBuffer key, int estimatedColumnCount, DataOutput output) {
this(cf, key, estimatedColumnCount, output, false);
}
public Builder(ColumnFamily cf, ByteBuffer key, DataOutput output) {
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo) {
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return // Row key
typeSizes.sizeof((short) keysize) + keysize + // Row data size
typeSizes.sizeof(0L) + DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes) + // Column count
typeSizes.sizeof(0);
}
public RangeTombstone.Tracker tombstoneTracker() {
return tombstoneTracker;
}
public int writtenAtomCount() {
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException {
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf) {
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0) {
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null) {
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException {
for (OnDiskAtom c : columns) add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException {
atomCount++;
if (firstColumn == null) {
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
// We don't count repeated tombstone marker in the block size, to avoid a situation
blockSize = 0;
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize()) {
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build() {
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn) {
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex {
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex) {
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder {
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf, ByteBuffer key, int estimatedColumnCount, DataOutput output, boolean fromStream) {
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
public Builder(ColumnFamily cf, ByteBuffer key, int estimatedColumnCount, DataOutput output) {
this(cf, key, estimatedColumnCount, output, false);
}
public Builder(ColumnFamily cf, ByteBuffer key, DataOutput output) {
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo) {
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return // Row key
typeSizes.sizeof((short) keysize) + keysize + // Row data size
typeSizes.sizeof(0L) + DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes) + // Column count
typeSizes.sizeof(0);
}
public RangeTombstone.Tracker tombstoneTracker() {
return tombstoneTracker;
}
public int writtenAtomCount() {
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException {
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf) {
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0) {
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null) {
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException {
for (OnDiskAtom c : columns) add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException {
atomCount++;
if (firstColumn == null) {
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
// We don't count repeated tombstone marker in the block size, to avoid a situation
blockSize = 0;
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize()) {
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build() {
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn) {
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
KeepBothMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex {
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex) {
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder {
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf, ByteBuffer key, int estimatedColumnCount, DataOutput output, boolean fromStream) {
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
public Builder(ColumnFamily cf, ByteBuffer key, int estimatedColumnCount, DataOutput output) {
this(cf, key, estimatedColumnCount, output, false);
}
public Builder(ColumnFamily cf, ByteBuffer key, DataOutput output) {
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo) {
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return // Row key
typeSizes.sizeof((short) keysize) + keysize + // Row data size
typeSizes.sizeof(0L) + DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes) + // Column count
typeSizes.sizeof(0);
}
public RangeTombstone.Tracker tombstoneTracker() {
return tombstoneTracker;
}
public int writtenAtomCount() {
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException {
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf) {
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0) {
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null) {
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException {
for (OnDiskAtom c : columns) add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException {
atomCount++;
if (firstColumn == null) {
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
// We don't count repeated tombstone marker in the block size, to avoid a situation
blockSize = 0;
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize()) {
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build() {
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn) {
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex {
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex) {
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder {
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf, ByteBuffer key, int estimatedColumnCount, DataOutput output, boolean fromStream) {
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
public Builder(ColumnFamily cf, ByteBuffer key, int estimatedColumnCount, DataOutput output) {
this(cf, key, estimatedColumnCount, output, false);
}
public Builder(ColumnFamily cf, ByteBuffer key, DataOutput output) {
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo) {
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return // Row key
typeSizes.sizeof((short) keysize) + keysize + // Row data size
typeSizes.sizeof(0L) + DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes) + // Column count
typeSizes.sizeof(0);
}
public RangeTombstone.Tracker tombstoneTracker() {
return tombstoneTracker;
}
public int writtenAtomCount() {
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException {
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf) {
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0) {
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null) {
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException {
for (OnDiskAtom c : columns) add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException {
atomCount++;
if (firstColumn == null) {
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
// We don't count repeated tombstone marker in the block size, to avoid a situation
blockSize = 0;
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize()) {
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build() {
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn) {
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
Safe
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex {
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex)
{
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder {
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output,
boolean fromStream)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
<<<<<<< MINE
=======
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output)
{
this(cf, key, estimatedColumnCount, output, false);
}
>>>>>>> YOURS
public Builder(ColumnFamily cf,
ByteBuffer key,
DataOutput output)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex {
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex)
{
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder {
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output,
boolean fromStream)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(estimatedColumnCount);
this.output = output;
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
<<<<<<< MINE
=======
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output)
{
this(cf, key, estimatedColumnCount, output, false);
}
>>>>>>> YOURS
public Builder(ColumnFamily cf,
ByteBuffer key,
DataOutput output)
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex
{
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex)
{
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder
{
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
<<<<<<< MINE
int estimatedColumnCount,
DataOutput output,
boolean fromStream)
=======
DataOutput output)
>>>>>>> YOURS
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
<<<<<<< MINE
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output)
{
this(cf, key, estimatedColumnCount, output, false);
=======
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
>>>>>>> YOURS
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.sstable.IndexHelper;
public class ColumnIndex
{
public final List<IndexHelper.IndexInfo> columnsIndex;
private static final ColumnIndex EMPTY = new ColumnIndex(Collections.<IndexHelper.IndexInfo>emptyList());
private ColumnIndex(List<IndexHelper.IndexInfo> columnsIndex)
{
this.columnsIndex = columnsIndex;
}
/**
* Help to create an index for a column family based on size of columns,
* and write said columns to disk.
*/
public static class Builder
{
private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
private final ColumnIndex result;
private final long indexOffset;
private long startPosition = -1;
private long endPosition = 0;
private long blockSize;
private OnDiskAtom firstColumn;
private OnDiskAtom lastColumn;
private OnDiskAtom lastBlockClosing;
private final DataOutput output;
private final RangeTombstone.Tracker tombstoneTracker;
private int atomCount;
public Builder(ColumnFamily cf,
ByteBuffer key,
<<<<<<< MINE
int estimatedColumnCount,
DataOutput output,
boolean fromStream)
=======
DataOutput output)
>>>>>>> YOURS
{
this.indexOffset = rowHeaderSize(key, cf.deletionInfo());
this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
this.output = output;
<<<<<<< MINE
this.atomSerializer = cf.getOnDiskSerializer();
this.tombstoneTracker = fromStream ? null : new RangeTombstone.Tracker(cf.getComparator());
}
public Builder(ColumnFamily cf,
ByteBuffer key,
int estimatedColumnCount,
DataOutput output)
{
this(cf, key, estimatedColumnCount, output, false);
=======
this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
>>>>>>> YOURS
}
/**
* Returns the number of bytes between the beginning of the row and the
* first serialized column.
*/
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
TypeSizes typeSizes = TypeSizes.NATIVE;
// TODO fix constantSize when changing the nativeconststs.
int keysize = key.remaining();
return typeSizes.sizeof((short) keysize) + keysize // Row key
+ typeSizes.sizeof(0L) // Row data size
+ DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes)
+ typeSizes.sizeof(0); // Column count
}
public RangeTombstone.Tracker tombstoneTracker()
{
return tombstoneTracker;
}
public int writtenAtomCount()
{
return tombstoneTracker == null ? atomCount : atomCount + tombstoneTracker.writtenAtom();
}
/**
* Serializes the index into in-memory structure with all required components
* such as Bloom Filter, index block size, IndexInfo list
*
* @param cf Column family to create index for
*
* @return information about index - it's Bloom Filter, block size and IndexInfo list
*/
public ColumnIndex build(ColumnFamily cf) throws IOException
{
Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
Comparator<ByteBuffer> comparator = cf.getComparator();
for (Column c : cf)
{
while (tombstone != null && comparator.compare(c.name(), tombstone.min) >= 0)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
add(c);
}
while (tombstone != null)
{
add(tombstone);
tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
}
return build();
}
public ColumnIndex build(Iterable<OnDiskAtom> columns) throws IOException
{
for (OnDiskAtom c : columns)
add(c);
return build();
}
public void add(OnDiskAtom column) throws IOException
{
atomCount++;
if (firstColumn == null)
{
firstColumn = column;
startPosition = endPosition;
// TODO: have that use the firstColumn as min + make sure we optimize that on read
if (tombstoneTracker != null)
endPosition += tombstoneTracker.writeOpenedMarker(firstColumn, output, atomSerializer);
blockSize = 0; // We don't count repeated tombstone marker in the block size, to avoid a situation
// where we wouldn't make any progress because a block is filled by said marker
}
long size = column.serializedSizeForSSTable();
endPosition += size;
blockSize += size;
// if we hit the column index size that we have to index after, go ahead and index it.
if (blockSize >= DatabaseDescriptor.getColumnIndexSize())
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), column.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
firstColumn = null;
lastBlockClosing = column;
}
if (output != null)
atomSerializer.serializeForSSTable(column, output);
// TODO: Should deal with removing unneeded tombstones
if (tombstoneTracker != null)
tombstoneTracker.update(column);
lastColumn = column;
}
public ColumnIndex build()
{
// all columns were GC'd after all
if (lastColumn == null)
return ColumnIndex.EMPTY;
// the last column may have fallen on an index boundary already. if not, index it explicitly.
if (result.columnsIndex.isEmpty() || lastBlockClosing != lastColumn)
{
IndexHelper.IndexInfo cIndexInfo = new IndexHelper.IndexInfo(firstColumn.name(), lastColumn.name(), indexOffset + startPosition, endPosition - startPosition);
result.columnsIndex.add(cIndexInfo);
}
// we should always have at least one computed index block, but we only write it out if there is more than that.
assert result.columnsIndex.size() > 0;
return result;
}
}
}
Diff Result
No diff
Case 2 - java_atmosphere.rev_c044f_2cf68..AtmosphereFilter.java
Base
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND, RESUME, BROADCAST, SUSPEND_RESUME,
SCHEDULE_RESUME, RESUME_ON_BROADCAST, NONE, SCHEDULE, SUSPEND_RESPONSE,
SUSPEND_TRACKABLE, SUBSCRIBE, SUBSCRIBE_TRACKABLE, PUBLISH
}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && transport.equals(LONG_POLLING_TRANSPORT)) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(ContainerRequest request, boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && transport.equals(LONG_POLLING_TRANSPORT)) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(ContainerRequest request, ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
switch (action) {
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(request, s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(request, s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(sessionSupported, resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(request, outputComments);
resumeOnBroadcast = resumeOnBroadcast(request, (action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(sessionSupported, resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean sessionSupported,
boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = UUID.randomUUID().toString();
// Re-generate a new one with proper scope.
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
bc = broadcasterFactory.get(c, id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
}
if (comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] suspendTimeout = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, suspendTimeout, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, suspendTimeout, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
suspendTimeout = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : suspendTimeout) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND, RESUME, BROADCAST, SUSPEND_RESUME,
SCHEDULE_RESUME, RESUME_ON_BROADCAST, NONE, SCHEDULE, SUSPEND_RESPONSE,
SUSPEND_TRACKABLE, SUBSCRIBE, SUBSCRIBE_TRACKABLE, PUBLISH
}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && transport.equals(LONG_POLLING_TRANSPORT)) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(ContainerRequest request, boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && transport.equals(LONG_POLLING_TRANSPORT)) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(ContainerRequest request, ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
switch (action) {
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(request, s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(request, s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(sessionSupported, resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(request, outputComments);
resumeOnBroadcast = resumeOnBroadcast(request, (action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(sessionSupported, resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean sessionSupported,
boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = UUID.randomUUID().toString();
// Re-generate a new one with proper scope.
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
bc = broadcasterFactory.get(c, id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
}
if (comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] suspendTimeout = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, suspendTimeout, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, suspendTimeout, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
suspendTimeout = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : suspendTimeout) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
Left
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND, RESUME, BROADCAST, SUSPEND_RESUME,
SCHEDULE_RESUME, RESUME_ON_BROADCAST, NONE, SCHEDULE, SUSPEND_RESPONSE,
SUSPEND_TRACKABLE, SUBSCRIBE, SUBSCRIBE_TRACKABLE, PUBLISH, ASYNCHRONOUS
}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch (action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value "
+ X_ATMOSPHERE_TRANSPORT
+ " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r,
long timeout,
boolean comments,
boolean resumeOnBroadcast,
URI location,
ContainerRequest request,
ContainerResponse response,
boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND, RESUME, BROADCAST, SUSPEND_RESUME,
SCHEDULE_RESUME, RESUME_ON_BROADCAST, NONE, SCHEDULE, SUSPEND_RESPONSE,
SUSPEND_TRACKABLE, SUBSCRIBE, SUBSCRIBE_TRACKABLE, PUBLISH, ASYNCHRONOUS
}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch (action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value "
+ X_ATMOSPHERE_TRANSPORT
+ " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r,
long timeout,
boolean comments,
boolean resumeOnBroadcast,
URI location,
ContainerRequest request,
ContainerResponse response,
boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
Right
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND, RESUME, BROADCAST, SUSPEND_RESUME,
SCHEDULE_RESUME, RESUME_ON_BROADCAST, NONE, SCHEDULE, SUSPEND_RESPONSE,
SUSPEND_TRACKABLE, SUBSCRIBE, SUBSCRIBE_TRACKABLE, PUBLISH
}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(ContainerRequest request, boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(ContainerRequest request, ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
switch (action) {
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(request, s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(request, s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(sessionSupported, resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(request, outputComments);
resumeOnBroadcast = resumeOnBroadcast(request, (action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(sessionSupported, resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean sessionSupported,
boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null){
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] suspendTimeout = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, suspendTimeout, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, suspendTimeout, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
suspendTimeout = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : suspendTimeout) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND, RESUME, BROADCAST, SUSPEND_RESUME,
SCHEDULE_RESUME, RESUME_ON_BROADCAST, NONE, SCHEDULE, SUSPEND_RESPONSE,
SUSPEND_TRACKABLE, SUBSCRIBE, SUBSCRIBE_TRACKABLE, PUBLISH
}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(ContainerRequest request, boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(ContainerRequest request, ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
switch (action) {
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(request, s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(request, s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(sessionSupported, resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(request, outputComments);
resumeOnBroadcast = resumeOnBroadcast(request, (action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(sessionSupported, resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean sessionSupported,
boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null){
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] suspendTimeout = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, suspendTimeout, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, suspendTimeout, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
suspendTimeout = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : suspendTimeout) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
MergeMethods
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public static final String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public static final String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public static final String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public static final String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public static final String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND(), RESUME(), BROADCAST(), SUSPEND_RESUME(), SCHEDULE_RESUME(), RESUME_ON_BROADCAST(), NONE(), SCHEDULE(), SUSPEND_RESPONSE(), SUSPEND_TRACKABLE(), SUBSCRIBE(), SUBSCRIBE_TRACKABLE(), PUBLISH(), ASYNCHRONOUS()
}
@Context
private HttpServletRequest servletReq;
@Context
private UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates = new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters = new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch(action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value " + X_ATMOSPHERE_TRANSPORT + " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk, translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response, broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(new IllegalStateException("Unable to retrieve suspended Response. " + "Either session-support is not enabled in atmosphere.xml or the" + "path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null)
throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null)
return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null)
return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast, boolean comments, long timeout, ContainerRequest request, ContainerResponse response, Broadcaster bc, AtmosphereResource<HttpServletRequest, HttpServletResponse> r, Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r, long timeout, boolean comments, boolean resumeOnBroadcast, URI location, ContainerRequest request, ContainerResponse response, boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(response.getEntity().getClass(), response.getEntityType(), response.getAnnotations(), l);
if (contentType == null || contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ? contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1)) || ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
void suspend(boolean sessionSupported, boolean resumeOnBroadcast, boolean comments, long timeout, ContainerRequest request, ContainerResponse response, Broadcaster bc, AtmosphereResource<HttpServletRequest, HttpServletResponse> r, Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(response.getEntity().getClass(), response.getEntityType(), response.getAnnotations(), l);
if (contentType == null || contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ? contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1)) || ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION, false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION, false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1)
return period;
switch(tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public static final String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public static final String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public static final String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public static final String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public static final String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND(), RESUME(), BROADCAST(), SUSPEND_RESUME(), SCHEDULE_RESUME(), RESUME_ON_BROADCAST(), NONE(), SCHEDULE(), SUSPEND_RESPONSE(), SUSPEND_TRACKABLE(), SUBSCRIBE(), SUBSCRIBE_TRACKABLE(), PUBLISH(), ASYNCHRONOUS()
}
@Context
private HttpServletRequest servletReq;
@Context
private UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates = new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters = new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch(action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value " + X_ATMOSPHERE_TRANSPORT + " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk, translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response, broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(new IllegalStateException("Unable to retrieve suspended Response. " + "Either session-support is not enabled in atmosphere.xml or the" + "path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null)
throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null)
return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null)
return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast, boolean comments, long timeout, ContainerRequest request, ContainerResponse response, Broadcaster bc, AtmosphereResource<HttpServletRequest, HttpServletResponse> r, Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r, long timeout, boolean comments, boolean resumeOnBroadcast, URI location, ContainerRequest request, ContainerResponse response, boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(response.getEntity().getClass(), response.getEntityType(), response.getAnnotations(), l);
if (contentType == null || contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ? contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1)) || ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
void suspend(boolean sessionSupported, boolean resumeOnBroadcast, boolean comments, long timeout, ContainerRequest request, ContainerResponse response, Broadcaster bc, AtmosphereResource<HttpServletRequest, HttpServletResponse> r, Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(response.getEntity().getClass(), response.getEntityType(), response.getAnnotations(), l);
if (contentType == null || contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ? contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1)) || ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION, false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION, false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1)
return period;
switch(tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
KeepBothMethods
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public static final String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public static final String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public static final String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public static final String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public static final String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND(), RESUME(), BROADCAST(), SUSPEND_RESUME(), SCHEDULE_RESUME(), RESUME_ON_BROADCAST(), NONE(), SCHEDULE(), SUSPEND_RESPONSE(), SUSPEND_TRACKABLE(), SUBSCRIBE(), SUBSCRIBE_TRACKABLE(), PUBLISH(), ASYNCHRONOUS()
}
@Context
private HttpServletRequest servletReq;
@Context
private UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates = new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters = new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(ContainerRequest request, boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch(action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value " + X_ATMOSPHERE_TRANSPORT + " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk, translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response, broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(new IllegalStateException("Unable to retrieve suspended Response. " + "Either session-support is not enabled in atmosphere.xml or the" + "path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null)
throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null)
return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null)
return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast, boolean comments, long timeout, ContainerRequest request, ContainerResponse response, Broadcaster bc, AtmosphereResource<HttpServletRequest, HttpServletResponse> r, Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r, long timeout, boolean comments, boolean resumeOnBroadcast, URI location, ContainerRequest request, ContainerResponse response, boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(response.getEntity().getClass(), response.getEntityType(), response.getAnnotations(), l);
if (contentType == null || contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ? contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1)) || ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
void suspend(boolean sessionSupported, boolean resumeOnBroadcast, boolean comments, long timeout, ContainerRequest request, ContainerResponse response, Broadcaster bc, AtmosphereResource<HttpServletRequest, HttpServletResponse> r, Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(response.getEntity().getClass(), response.getEntityType(), response.getAnnotations(), l);
if (contentType == null || contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ? contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1)) || ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION, false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION, false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1)
return period;
switch(tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public static final String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public static final String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public static final String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public static final String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public static final String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND(), RESUME(), BROADCAST(), SUSPEND_RESUME(), SCHEDULE_RESUME(), RESUME_ON_BROADCAST(), NONE(), SCHEDULE(), SUSPEND_RESPONSE(), SUSPEND_TRACKABLE(), SUBSCRIBE(), SUBSCRIBE_TRACKABLE(), PUBLISH(), ASYNCHRONOUS()
}
@Context
private HttpServletRequest servletReq;
@Context
private UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates = new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters = new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(ContainerRequest request, boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch(action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value " + X_ATMOSPHERE_TRANSPORT + " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk, translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response, broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(new IllegalStateException("Unable to retrieve suspended Response. " + "Either session-support is not enabled in atmosphere.xml or the" + "path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null)
throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null)
return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null)
return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast, boolean comments, long timeout, ContainerRequest request, ContainerResponse response, Broadcaster bc, AtmosphereResource<HttpServletRequest, HttpServletResponse> r, Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r, long timeout, boolean comments, boolean resumeOnBroadcast, URI location, ContainerRequest request, ContainerResponse response, boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(response.getEntity().getClass(), response.getEntityType(), response.getAnnotations(), l);
if (contentType == null || contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ? contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1)) || ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
void suspend(boolean sessionSupported, boolean resumeOnBroadcast, boolean comments, long timeout, ContainerRequest request, ContainerResponse response, Broadcaster bc, AtmosphereResource<HttpServletRequest, HttpServletResponse> r, Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(response.getEntity().getClass(), response.getEntityType(), response.getAnnotations(), l);
if (contentType == null || contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ? contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1)) || ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION, false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION, false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1)
return period;
switch(tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
Safe
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND , RESUME , BROADCAST , SUSPEND_RESUME ,
SCHEDULE_RESUME , RESUME_ON_BROADCAST , NONE , SCHEDULE , SUSPEND_RESPONSE ,
SUSPEND_TRACKABLE , SUBSCRIBE , SUBSCRIBE_TRACKABLE , PUBLISH , ASYNCHRONOUS}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
<<<<<<< MINE
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
=======
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
>>>>>>> YOURS
boolean outputJunk(ContainerRequest request, boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch (action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value "
+ X_ATMOSPHERE_TRANSPORT
+ " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r,
long timeout,
boolean comments,
boolean resumeOnBroadcast,
URI location,
ContainerRequest request,
ContainerResponse response,
boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
void suspend(boolean sessionSupported,
boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null){
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND , RESUME , BROADCAST , SUSPEND_RESUME ,
SCHEDULE_RESUME , RESUME_ON_BROADCAST , NONE , SCHEDULE , SUSPEND_RESPONSE ,
SUSPEND_TRACKABLE , SUBSCRIBE , SUBSCRIBE_TRACKABLE , PUBLISH , ASYNCHRONOUS}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
<<<<<<< MINE
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
=======
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
>>>>>>> YOURS
boolean outputJunk(ContainerRequest request, boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch (action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value "
+ X_ATMOSPHERE_TRANSPORT
+ " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null) {
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r,
long timeout,
boolean comments,
boolean resumeOnBroadcast,
URI location,
ContainerRequest request,
ContainerResponse response,
boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
void suspend(boolean sessionSupported,
boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
URI location = null;
// Do not add location header if already there.
if (!sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
if (id == null){
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
configureFilter(bc);
r.setBroadcaster(bc);
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null) {
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
Unstructured
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND, RESUME, BROADCAST, SUSPEND_RESUME,
SCHEDULE_RESUME, RESUME_ON_BROADCAST, NONE, SCHEDULE, SUSPEND_RESPONSE,
SUSPEND_TRACKABLE, SUBSCRIBE, SUBSCRIBE_TRACKABLE, PUBLISH, ASYNCHRONOUS
}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
<<<<<<< MINE
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
=======
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
>>>>>>> YOURS
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch (action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value "
+ X_ATMOSPHERE_TRANSPORT
+ " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
<<<<<<< MINE
if (id == null) {
=======
if (id == null){
>>>>>>> YOURS
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r,
long timeout,
boolean comments,
boolean resumeOnBroadcast,
URI location,
ContainerRequest request,
ContainerResponse response,
boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
/*
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2007-2008 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can obtain
* a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
* or glassfish/bootstrap/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
* Sun designates this particular file as subject to the "Classpath" exception
* as provided by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the License
* Header, with the fields enclosed by brackets [] replaced by your own
* identifying information: "Portions Copyrighted [year]
* [name of copyright owner]"
*
* Contributor(s):
*
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package org.atmosphere.jersey;
import com.sun.jersey.api.JResponseAsResponse;
import com.sun.jersey.api.model.AbstractMethod;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import com.sun.jersey.spi.container.ResourceFilterFactory;
import org.atmosphere.annotation.Asynchronous;
import org.atmosphere.annotation.Broadcast;
import org.atmosphere.annotation.Cluster;
import org.atmosphere.annotation.Publish;
import org.atmosphere.annotation.Resume;
import org.atmosphere.annotation.Schedule;
import org.atmosphere.annotation.Subscribe;
import org.atmosphere.annotation.Suspend;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereEventLifecycle;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceEvent;
import org.atmosphere.cpr.AtmosphereResourceEventListener;
import org.atmosphere.cpr.AtmosphereResourceEventListenerAdapter;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.BroadcastFilter;
import org.atmosphere.cpr.Broadcaster;
import org.atmosphere.cpr.BroadcasterConfig;
import org.atmosphere.cpr.BroadcasterFactory;
import org.atmosphere.cpr.ClusterBroadcastFilter;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.Trackable;
import org.atmosphere.di.InjectorProvider;
import org.atmosphere.websocket.WebSocket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.atmosphere.cpr.HeaderConfig.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.atmosphere.cpr.HeaderConfig.CACHE_CONTROL;
import static org.atmosphere.cpr.HeaderConfig.EXPIRES;
import static org.atmosphere.cpr.HeaderConfig.JSONP_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.LONG_POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.POLLING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.PRAGMA;
import static org.atmosphere.cpr.HeaderConfig.STREAMING_TRANSPORT;
import static org.atmosphere.cpr.HeaderConfig.WEBSOCKET_UPGRADE;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_ERROR;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRACKING_ID;
import static org.atmosphere.cpr.HeaderConfig.X_ATMOSPHERE_TRANSPORT;
/**
* {@link ResourceFilterFactory} which intercept the response and appropriately
* set the {@link AtmosphereResourceEvent} filed based on the annotation the application
* has defined.
*
* @author Jeanfrancois Arcand
*/
public class AtmosphereFilter implements ResourceFilterFactory {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereFilter.class);
public final static String SUSPENDED_RESOURCE = AtmosphereFilter.class.getName() + ".suspendedResource";
public final static String RESUME_UUID = AtmosphereFilter.class.getName() + ".uuid";
public final static String RESUME_CANDIDATES = AtmosphereFilter.class.getName() + ".resumeCandidates";
public final static String INJECTED_BROADCASTER = AtmosphereFilter.class.getName() + "injectedBroadcaster";
public final static String INJECTED_TRACKABLE = AtmosphereFilter.class.getName() + "injectedTrackable";
enum Action {
SUSPEND, RESUME, BROADCAST, SUSPEND_RESUME,
SCHEDULE_RESUME, RESUME_ON_BROADCAST, NONE, SCHEDULE, SUSPEND_RESPONSE,
SUSPEND_TRACKABLE, SUBSCRIBE, SUBSCRIBE_TRACKABLE, PUBLISH, ASYNCHRONOUS
}
private
@Context
HttpServletRequest servletReq;
private
@Context
UriInfo uriInfo;
private boolean useResumeAnnotation = false;
private final ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>> resumeCandidates =
new ConcurrentHashMap<String, AtmosphereResource<HttpServletRequest, HttpServletResponse>>();
private class Filter implements ResourceFilter, ContainerResponseFilter {
private final Action action;
private final long timeout;
private final int waitFor;
private final Suspend.SCOPE scope;
private final Class<BroadcastFilter>[] filters;
private Class<? extends AtmosphereResourceEventListener>[] listeners = null;
private final boolean outputComments;
private final ArrayList<ClusterBroadcastFilter> clusters
= new ArrayList<ClusterBroadcastFilter>();
private final String topic;
protected Filter(Action action) {
this(action, -1);
}
protected Filter(Action action, long timeout) {
this(action, timeout, 0);
}
protected Filter(Action action, long timeout, int waitFor) {
this(action, timeout, waitFor, Suspend.SCOPE.APPLICATION);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope) {
this(action, timeout, waitFor, scope, true);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments) {
this(action, timeout, waitFor, scope, outputComments, null, null);
}
protected Filter(Action action, long timeout, int waitFor, Suspend.SCOPE scope, boolean outputComments, Class<BroadcastFilter>[] filters, String topic) {
this.action = action;
this.timeout = timeout;
this.scope = scope;
this.outputComments = outputComments;
this.waitFor = waitFor;
this.filters = filters;
this.topic = topic;
}
public ContainerRequestFilter getRequestFilter() {
return null;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
<<<<<<< MINE
boolean resumeOnBroadcast(boolean resumeOnBroadcast) {
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
=======
boolean resumeOnBroadcast(ContainerRequest request, boolean resumeOnBroadcast) {
String transport = request.getHeaderValue(X_ATMOSPHERE_TRANSPORT);
>>>>>>> YOURS
if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return true;
}
return resumeOnBroadcast;
}
boolean outputJunk(boolean outputJunk) {
boolean webSocketEnabled = false;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade.trim().equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
webSocketEnabled = true;
break;
}
}
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
if (webSocketEnabled) {
return false;
} else if (transport != null && (transport.equals(JSONP_TRANSPORT) || transport.equals(LONG_POLLING_TRANSPORT))) {
return false;
}
return outputJunk;
}
/**
* Configure the {@link AtmosphereResourceEvent} state (suspend, resume, broadcast)
* based on the annotation the web application has used.
*
* @param request the {@link ContainerRequest}
* @param response the {@link ContainerResponse}
* @return the {@link ContainerResponse}
*/
public ContainerResponse filter(final ContainerRequest request, final ContainerResponse response) {
if (response.getMappedThrowable() != null) {
return response;
}
AtmosphereResource<HttpServletRequest, HttpServletResponse> r =
(AtmosphereResource<HttpServletRequest, HttpServletResponse>) servletReq
.getAttribute(FrameworkConfig.ATMOSPHERE_RESOURCE);
if (Boolean.parseBoolean((String) servletReq.getAttribute(ApplicationConfig.SUPPORT_LOCATION_HEADER))) {
useResumeAnnotation = true;
}
switch (action) {
case ASYNCHRONOUS:
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
String transport = servletReq.getHeader(X_ATMOSPHERE_TRANSPORT);
String broadcasterName = servletReq.getHeader(topic);
if (transport == null || broadcasterName == null) {
throw new WebApplicationException(new IllegalStateException("Must specify transport using header value "
+ X_ATMOSPHERE_TRANSPORT
+ " and uuid " + X_ATMOSPHERE_TRACKING_ID));
}
String subProtocol = (String) servletReq.getAttribute(FrameworkConfig.WEBSOCKET_SUBPROTOCOL);
final boolean waitForResource = waitFor == -1 ? true : false;
final Broadcaster bcaster = BroadcasterFactory.getDefault().lookup(broadcasterName, true);
if (!transport.startsWith(POLLING_TRANSPORT) && subProtocol == null) {
boolean outputJunk = transport.equalsIgnoreCase(STREAMING_TRANSPORT);
final boolean resumeOnBroadcast = resumeOnBroadcast(false);
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener));
}
}
final Object entity = response.getEntity();
r.addEventListener(new AtmosphereResourceEventListenerAdapter() {
@Override
public void onSuspend(AtmosphereResourceEvent<HttpServletRequest, HttpServletResponse> event) {
try {
if (entity != null) {
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
}
} finally {
event.getResource().removeEventListener(this);
}
}
});
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
r.setBroadcaster(bcaster);
executeSuspend(r, timeout, outputJunk, resumeOnBroadcast, null, request, response, false);
} else {
Object entity = response.getEntity();
if (waitForResource) {
bcaster.awaitAndBroadcast(entity, 30, TimeUnit.SECONDS);
} else {
bcaster.broadcast(entity);
}
if (subProtocol == null) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
response.setEntity(entity);
response.write();
} catch (Throwable t) {
logger.debug("Error running Callable", t);
response.setEntity(null);
}
} else {
response.setEntity(null);
}
}
break;
case SUSPEND_RESPONSE:
SuspendResponse<?> s = SuspendResponse.class.cast(JResponseAsResponse.class.cast(response.getResponse()).getJResponse());
boolean outputJunk = outputJunk(s.outputComments());
boolean resumeOnBroadcast = resumeOnBroadcast(s.resumeOnBroadcast());
for (AtmosphereResourceEventListener el : s.listeners()) {
if (r instanceof AtmosphereEventLifecycle) {
r.addEventListener(el);
}
}
Broadcaster bc = s.broadcaster();
if (bc == null && s.scope() != Suspend.SCOPE.REQUEST) {
bc = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
}
boolean supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
boolean isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
TrackableResource<? extends Trackable> trackableResource = null;
if (isTracked) {
trackableResource = preTrack(request, response);
}
suspend(resumeOnBroadcast, outputJunk,
translateTimeUnit(s.period().value(), s.period().timeUnit()), request, response, bc, r, s.scope());
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case SUBSCRIBE_TRACKABLE:
case SUBSCRIBE:
case SUSPEND:
case SUSPEND_TRACKABLE:
case SUSPEND_RESUME:
outputJunk = outputJunk(outputComments);
resumeOnBroadcast = resumeOnBroadcast((action == Action.SUSPEND_RESUME));
for (Class<? extends AtmosphereResourceEventListener> listener : listeners) {
try {
AtmosphereResourceEventListener el = listener.newInstance();
InjectorProvider.getInjector().inject(el);
if (r instanceof AtmosphereEventLifecycle) {
((AtmosphereEventLifecycle) r).addEventListener(el);
}
} catch (Throwable t) {
throw new WebApplicationException(
new IllegalStateException("Invalid AtmosphereResourceEventListener " + listener, t));
}
}
Broadcaster broadcaster = (Broadcaster) servletReq.getAttribute(INJECTED_BROADCASTER);
// @Subscribe
if (action == Action.SUBSCRIBE) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
broadcaster = BroadcasterFactory.getDefault().lookup(c, topic, true);
}
// Tracking is enabled by default
supportTrackable = servletReq.getAttribute(ApplicationConfig.SUPPORT_TRACKABLE) != null;
// Register our TrackableResource
isTracked = response.getEntity() != null ? TrackableResource.class.isAssignableFrom(response.getEntity().getClass()) : supportTrackable;
if (isTracked) {
trackableResource = preTrack(request, response);
} else {
trackableResource = null;
}
suspend(resumeOnBroadcast, outputJunk, timeout, request, response,
broadcaster, r, scope);
// Associate the tracked resource.
if (isTracked && trackableResource != null) {
postTrack(trackableResource, r);
}
break;
case RESUME:
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
if (sessionSupported) {
r = (AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
} else {
String path = response.getContainerRequest().getPath();
r = resumeCandidates.remove(path.substring(path.lastIndexOf("/") + 1));
}
if (r != null) {
resume(r);
} else {
throw new WebApplicationException(
new IllegalStateException("Unable to retrieve suspended Response. " +
"Either session-support is not enabled in atmosphere.xml or the" +
"path used to resume is invalid."));
}
break;
case BROADCAST:
case PUBLISH:
case RESUME_ON_BROADCAST:
AtmosphereResource ar = (AtmosphereResource) servletReq.getAttribute(SUSPENDED_RESOURCE);
if (ar != null) {
r = ar;
}
if (action == Action.PUBLISH) {
Class<Broadcaster> c = null;
try {
c = (Class<Broadcaster>) Class.forName((String) servletReq.getAttribute(ApplicationConfig.BROADCASTER_CLASS));
} catch (Throwable e) {
throw new IllegalStateException(e.getMessage());
}
r.setBroadcaster(BroadcasterFactory.getDefault().lookup(c, topic, true));
}
broadcast(response, r, timeout);
break;
case SCHEDULE:
case SCHEDULE_RESUME:
Object o = response.getEntity();
Broadcaster b = r.getBroadcaster();
if (response.getEntity() instanceof Broadcastable) {
b = ((Broadcastable) response.getEntity()).getBroadcaster();
o = ((Broadcastable) response.getEntity()).getMessage();
response.setEntity(((Broadcastable) response.getEntity()).getResponseMessage());
}
if (response.getEntity() != null) {
try {
response.write();
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
if (action == Action.SCHEDULE_RESUME) {
configureResumeOnBroadcast(b);
}
b.scheduleFixedBroadcast(o, waitFor, timeout, TimeUnit.SECONDS);
break;
}
return response;
}
TrackableResource preTrack(ContainerRequest request, ContainerResponse response) {
TrackableResource<? extends Trackable> trackableResource = TrackableResource.class.cast(response.getEntity());
if (trackableResource == null) {
trackableResource = new TrackableResource<AtmosphereResource>(AtmosphereResource.class, servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID), "");
} else {
response.setEntity(trackableResource.entity());
}
String trackableUUID = request.getHeaderValue(X_ATMOSPHERE_TRACKING_ID);
if (trackableUUID == null && trackableResource.trackingID() != null) {
trackableUUID = trackableResource.trackingID();
} else if (trackableUUID == null) {
trackableUUID = UUID.randomUUID().toString();
}
trackableResource.setTrackingID(trackableUUID);
TrackableSession.getDefault().track(trackableResource);
response.getHttpHeaders().putSingle(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
servletReq.setAttribute(X_ATMOSPHERE_TRACKING_ID, trackableResource.trackingID());
return trackableResource;
}
void postTrack(TrackableResource trackableResource, AtmosphereResource r) {
boolean isAresource = AtmosphereResource.class.isAssignableFrom(trackableResource.type()) ? true : false;
trackableResource.setResource(isAresource ? r : r.getBroadcaster());
}
Response.ResponseBuilder configureHeaders(Response.ResponseBuilder b) throws IOException {
boolean webSocketSupported = servletReq.getAttribute(WebSocket.WEBSOCKET_SUSPEND) != null;
if (servletReq.getHeaders("Connection") != null && servletReq.getHeaders("Connection").hasMoreElements()) {
String[] e = ((Enumeration<String>) servletReq.getHeaders("Connection")).nextElement().split(",");
for (String upgrade : e) {
if (upgrade != null && upgrade.equalsIgnoreCase(WEBSOCKET_UPGRADE)) {
if (!webSocketSupported) {
b = b.header(X_ATMOSPHERE_ERROR, "Websocket protocol not supported");
}
}
}
}
boolean injectCacheHeaders = (Boolean) servletReq.getAttribute(ApplicationConfig.NO_CACHE_HEADERS);
boolean enableAccessControl = (Boolean) servletReq.getAttribute(ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
if (injectCacheHeaders) {
// Set to expire far in the past.
b = b.header(EXPIRES, "-1");
// Set standard HTTP/1.1 no-cache headers.
b = b.header(CACHE_CONTROL, "no-store, no-cache, must-revalidate");
// Set standard HTTP/1.0 no-cache header.
b = b.header(PRAGMA, "no-cache");
}
if (enableAccessControl) {
b = b.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
b = b.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
return b;
}
void configureResumeOnBroadcast(Broadcaster b) {
Iterator<AtmosphereResource<?, ?>> i = b.getAtmosphereResources().iterator();
while (i.hasNext()) {
HttpServletRequest r = (HttpServletRequest) i.next().getRequest();
r.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, true);
}
}
void configureFilter(Broadcaster bc) {
if (bc == null) throw new WebApplicationException(new IllegalStateException("Broadcaster cannot be null"));
/**
* Here we can't predict if it's the same set of filter shared across all Broadcaster as
* Broadcaster can have their own BroadcasterConfig instance.
*/
BroadcasterConfig c = bc.getBroadcasterConfig();
// Already configured
if (c.hasFilters()) {
return;
}
// Always the first one, before any transformation/filtering
for (ClusterBroadcastFilter cbf : clusters) {
cbf.setBroadcaster(bc);
c.addFilter(cbf);
}
BroadcastFilter f = null;
if (filters != null) {
for (Class<BroadcastFilter> filter : filters) {
try {
f = filter.newInstance();
InjectorProvider.getInjector().inject(f);
} catch (Throwable t) {
logger.warn("Invalid @BroadcastFilter: " + filter, t);
}
c.addFilter(f);
}
}
}
private void setListeners(Class<? extends AtmosphereResourceEventListener>[] listeners) {
this.listeners = listeners;
}
void broadcast(ContainerResponse r, AtmosphereResource ar, long delay) {
Object o = r.getEntity();
Broadcaster b = ar.getBroadcaster();
Object msg = o;
String returnMsg = null;
// Something went wrong if null.
if (o instanceof Broadcastable) {
if (((Broadcastable) o).getBroadcaster() != null) {
b = ((Broadcastable) o).getBroadcaster();
}
msg = ((Broadcastable) o).getMessage();
returnMsg = ((Broadcastable) o).getResponseMessage().toString();
}
if (action == Action.RESUME_ON_BROADCAST) {
configureResumeOnBroadcast(b);
}
if (o != null) {
addFilter(b);
try {
r.setEntity(msg);
if (msg == null) return;
if (delay == -1) {
Future<Object> f = b.broadcast(msg);
if (f == null) return;
Object t = f.get();
if (o instanceof Broadcastable) {
r.setEntity(returnMsg);
}
} else if (delay == 0) {
b.delayBroadcast(msg);
} else {
b.delayBroadcast(msg, delay, TimeUnit.SECONDS);
}
} catch (InterruptedException ex) {
logger.error("broadcast interrupted", ex);
} catch (ExecutionException ex) {
logger.error("execution exception during broadcast", ex);
}
}
}
void addFilter(Broadcaster bc) {
configureFilter(bc);
}
void resume(AtmosphereResource resource) {
resource.resume();
}
void addCluster(ClusterBroadcastFilter f) {
clusters.add(f);
}
void suspend(boolean resumeOnBroadcast,
boolean comments,
long timeout,
ContainerRequest request,
ContainerResponse response,
Broadcaster bc,
AtmosphereResource<HttpServletRequest, HttpServletResponse> r,
Suspend.SCOPE localScope) {
// Force the status code to 200 events independently of the value of the entity (null or not)
if (response.getStatus() == 204) {
response.setStatus(200);
}
BroadcasterFactory broadcasterFactory = (BroadcasterFactory) servletReq
.getAttribute(ApplicationConfig.BROADCASTER_FACTORY);
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
URI location = null;
// Do not add location header if already there.
if (useResumeAnnotation && !sessionSupported && !resumeOnBroadcast && response.getHttpHeaders().getFirst("Location") == null) {
String uuid = UUID.randomUUID().toString();
location = uriInfo.getAbsolutePathBuilder().path(uuid).build("");
resumeCandidates.put(uuid, r);
servletReq.setAttribute(RESUME_UUID, uuid);
servletReq.setAttribute(RESUME_CANDIDATES, resumeCandidates);
}
if (bc == null && localScope != Suspend.SCOPE.REQUEST) {
bc = r.getBroadcaster();
}
if (sessionSupported && localScope != Suspend.SCOPE.REQUEST && servletReq.getSession().getAttribute(SUSPENDED_RESOURCE) != null) {
AtmosphereResource<HttpServletRequest, HttpServletResponse> cached =
(AtmosphereResource) servletReq.getSession().getAttribute(SUSPENDED_RESOURCE);
bc = cached.getBroadcaster();
// Just in case something went wrong.
try {
bc.removeAtmosphereResource(cached);
} catch (IllegalStateException ex) {
logger.trace(ex.getMessage(), ex);
}
}
if (response.getEntity() instanceof Broadcastable) {
Broadcastable b = (Broadcastable) response.getEntity();
bc = b.getBroadcaster();
response.setEntity(b.getResponseMessage());
}
if ((localScope == Suspend.SCOPE.REQUEST) && bc == null) {
if (bc == null) {
try {
String id = servletReq.getHeader(X_ATMOSPHERE_TRACKING_ID);
<<<<<<< MINE
if (id == null) {
=======
if (id == null){
>>>>>>> YOURS
id = UUID.randomUUID().toString();
}
bc = broadcasterFactory.get(id);
bc.setScope(Broadcaster.SCOPE.REQUEST);
} catch (Exception ex) {
logger.error("failed to instantiate broadcaster with factory: " + broadcasterFactory, ex);
}
} else {
bc.setScope(Broadcaster.SCOPE.REQUEST);
}
}
r.setBroadcaster(bc);
if (resumeOnBroadcast) {
servletReq.setAttribute(ApplicationConfig.RESUME_ON_BROADCAST, new Boolean(true));
}
executeSuspend(r, timeout, comments, resumeOnBroadcast, location, request, response, true);
}
void executeSuspend(AtmosphereResource r,
long timeout,
boolean comments,
boolean resumeOnBroadcast,
URI location,
ContainerRequest request,
ContainerResponse response,
boolean flushEntity) {
boolean sessionSupported = (Boolean) servletReq.getAttribute(FrameworkConfig.SUPPORT_SESSION);
configureFilter(r.getBroadcaster());
if (sessionSupported) {
servletReq.getSession().setAttribute(SUSPENDED_RESOURCE, r);
servletReq.getSession().setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
}
servletReq.setAttribute(SUSPENDED_RESOURCE, r);
servletReq.setAttribute(FrameworkConfig.CONTAINER_RESPONSE, response);
logger.debug("Linked HttpServletRequest {} with ContainerResponse {}", servletReq, response);
// Set the content-type based on the returned entity.
try {
MediaType contentType = response.getMediaType();
if (contentType == null && response.getEntity() != null) {
LinkedList<MediaType> l = new LinkedList<MediaType>();
// Will retrun the first
l.add(request.getAcceptableMediaType(new LinkedList<MediaType>()));
contentType = response.getMessageBodyWorkers().getMessageBodyWriterMediaType(
response.getEntity().getClass(),
response.getEntityType(),
response.getAnnotations(),
l);
if (contentType == null ||
contentType.isWildcardType() || contentType.isWildcardSubtype())
contentType = MediaType.APPLICATION_OCTET_STREAM_TYPE;
}
Object entity = response.getEntity();
Response.ResponseBuilder b = Response.ok();
b = configureHeaders(b);
if (entity != null) {
b = b.header("Content-Type", contentType != null ?
contentType.toString() : "text/html; charset=ISO-8859-1");
servletReq.setAttribute(FrameworkConfig.EXPECTED_CONTENT_TYPE, contentType.toString());
}
boolean eclipse362468 = false;
String serverInfo = r.getAtmosphereConfig().getServletContext().getServerInfo();
if (serverInfo.indexOf("jetty") != -1) {
String[] jettyVersion = serverInfo.substring(6).split("\\.");
// https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468
eclipse362468 = ((Integer.valueOf(jettyVersion[0]) == 8 && Integer.valueOf(jettyVersion[1]) == 0 && Integer.valueOf(jettyVersion[2]) > 1))
|| ((Integer.valueOf(jettyVersion[0]) == 7 && Integer.valueOf(jettyVersion[1]) == 5 && Integer.valueOf(jettyVersion[2]) == 4));
if (comments && eclipse362468) {
logger.debug("Padding response is disabled to workaround https://bugs.eclipse.org/bugs/show_bug.cgi?id=362468");
}
}
if (!eclipse362468 && comments && !resumeOnBroadcast) {
String padding = (String) servletReq.getAttribute(ApplicationConfig.STREAMING_PADDING_MODE);
String paddingData = AtmosphereResourceImpl.createStreamingPadding(padding);
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
location = null;
}
response.setResponse(b.entity(paddingData).build());
response.write();
}
if (entity != null && flushEntity) {
try {
if (Callable.class.isAssignableFrom(entity.getClass())) {
entity = Callable.class.cast(entity).call();
}
} catch (Throwable t) {
logger.error("Error executing callable {}", entity);
entity = null;
}
if (location != null) {
b = b.header(HttpHeaders.LOCATION, location);
}
response.setResponse(b.entity(entity).build());
response.write();
}
response.setEntity(null);
r.suspend(timeout, false);
} catch (IOException ex) {
throw new WebApplicationException(ex);
}
}
}
/**
* Create a {@link ResourceFilter} which contains the information about the
* annotation being processed.
* <p/>
* XXX Need to filter invalid mix of annotation.
*
* @param am an {@link AbstractMethod}
* @return a List of {@link ResourceFilter} to invoke.
*/
public List<ResourceFilter> create(AbstractMethod am) {
LinkedList<ResourceFilter> list = new LinkedList<ResourceFilter>();
Filter f;
if (logger.isDebugEnabled()) {
for (Annotation annotation : am.getAnnotations()) {
logger.debug("AtmosphereFilter processing annotation: {}", annotation);
}
}
if (am.getMethod() == null) {
return null;
}
if (SuspendResponse.class.isAssignableFrom(am.getMethod().getReturnType())) {
list.addLast(new Filter(Action.SUSPEND_RESPONSE));
return list;
}
if (am.isAnnotationPresent(Broadcast.class)) {
int delay = am.getAnnotation(Broadcast.class).delay();
Class[] broadcastFilter = am.getAnnotation(Broadcast.class).value();
if (am.getAnnotation(Broadcast.class).resumeOnBroadcast()) {
f = new Filter(Action.RESUME_ON_BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
} else {
f = new Filter(Action.BROADCAST, delay, 0, Suspend.SCOPE.APPLICATION, true, broadcastFilter, null);
}
list.addLast(f);
if (am.isAnnotationPresent(Cluster.class)) {
broadcastFilter = am.getAnnotation(Cluster.class).value();
for (Class<ClusterBroadcastFilter> c : broadcastFilter) {
try {
ClusterBroadcastFilter cbf = c.newInstance();
InjectorProvider.getInjector().inject(cbf);
cbf.setUri(am.getAnnotation(Cluster.class).name());
f.addCluster(cbf);
} catch (Throwable t) {
logger.warn("Invalid ClusterBroadcastFilter", t);
}
}
}
}
if (am.isAnnotationPresent(Asynchronous.class)) {
int suspendTimeout = am.getAnnotation(Asynchronous.class).period();
Class[] broadcastFilter = am.getAnnotation(Asynchronous.class).broadcastFilter();
boolean wait = am.getAnnotation(Asynchronous.class).waitForResource();
f = new Filter(Action.ASYNCHRONOUS, suspendTimeout, wait ? -1 : 0, null, false, broadcastFilter, am.getAnnotation(Asynchronous.class).header());
f.setListeners(am.getAnnotation(Asynchronous.class).eventListeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Suspend.class)) {
long suspendTimeout = am.getAnnotation(Suspend.class).period();
TimeUnit tu = am.getAnnotation(Suspend.class).timeUnit();
suspendTimeout = translateTimeUnit(suspendTimeout, tu);
Suspend.SCOPE scope = am.getAnnotation(Suspend.class).scope();
boolean outputComments = am.getAnnotation(Suspend.class).outputComments();
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
if (am.getAnnotation(Suspend.class).resumeOnBroadcast()) {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND_RESUME, suspendTimeout, 0, scope, outputComments);
} else {
f = new Filter(trackable ? Action.SUSPEND_TRACKABLE : Action.SUSPEND, suspendTimeout, 0, scope, outputComments);
}
f.setListeners(am.getAnnotation(Suspend.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Subscribe.class)) {
boolean trackable = false;
if (TrackableResource.class.isAssignableFrom(am.getMethod().getReturnType())) {
trackable = true;
}
f = new Filter(trackable ? Action.SUBSCRIBE_TRACKABLE : Action.SUBSCRIBE, 30000, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Subscribe.class).value());
f.setListeners(am.getAnnotation(Subscribe.class).listeners());
list.addFirst(f);
}
if (am.isAnnotationPresent(Publish.class)) {
f = new Filter(Action.PUBLISH, -1, -1, Suspend.SCOPE.APPLICATION,
false, null, am.getAnnotation(Publish.class).value());
list.addFirst(f);
}
if (am.isAnnotationPresent(Resume.class)) {
useResumeAnnotation = true;
int suspendTimeout = am.getAnnotation(Resume.class).value();
list.addFirst(new Filter(Action.RESUME, suspendTimeout));
}
if (am.isAnnotationPresent(Schedule.class)) {
int period = am.getAnnotation(Schedule.class).period();
int waitFor = am.getAnnotation(Schedule.class).waitFor();
if (am.getAnnotation(Schedule.class).resumeOnBroadcast()) {
list.addFirst(new Filter(Action.SCHEDULE_RESUME, period, waitFor));
} else {
list.addFirst(new Filter(Action.SCHEDULE, period, waitFor));
}
}
// Nothing, normal Jersey application.
return list.size() > 0 ? list : null;
}
private long translateTimeUnit(long period, TimeUnit tu) {
if (period == -1) return period;
switch (tu) {
case SECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.SECONDS);
case MINUTES:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MINUTES);
case HOURS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.HOURS);
case DAYS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.DAYS);
case MILLISECONDS:
return period;
case MICROSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.MICROSECONDS);
case NANOSECONDS:
return TimeUnit.MILLISECONDS.convert(period, TimeUnit.NANOSECONDS);
}
return period;
}
}
Diff Result
No diff
Case 3 - java_atmosphere.rev_c044f_2cf68..EchoProtocol.java
Base
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
*
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereServlet.class);
@Override
public HttpServletRequest parseMessage(AtmosphereResource resource, String data) {
logger.trace("broadcast String");
resource.getBroadcaster().broadcast(data);
return null;
}
@Override
public HttpServletRequest parseMessage(AtmosphereResource resource, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
resource.getBroadcaster().broadcast(b);
return null;
}
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
*
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereServlet.class);
@Override
public HttpServletRequest parseMessage(AtmosphereResource resource, String data) {
logger.trace("broadcast String");
resource.getBroadcaster().broadcast(data);
return null;
}
@Override
public HttpServletRequest parseMessage(AtmosphereResource resource, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
resource.getBroadcaster().broadcast(b);
return null;
}
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
}
Left
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
webSocket.resource().getBroadcaster().broadcast(data);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
webSocket.resource().getBroadcaster().broadcast(b);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
webSocket.resource().getBroadcaster().broadcast(data);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
webSocket.resource().getBroadcaster().broadcast(b);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
Right
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
resource.getBroadcaster().broadcast(data);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
resource.getBroadcaster().broadcast(b);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
resource.getBroadcaster().broadcast(data);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
resource.getBroadcaster().broadcast(b);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
MergeMethods
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(data);
=======
resource.getBroadcaster().broadcast(data);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(b);
=======
resource.getBroadcaster().broadcast(b);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(data);
=======
resource.getBroadcaster().broadcast(data);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(b);
=======
resource.getBroadcaster().broadcast(b);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
KeepBothMethods
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(data);
=======
resource.getBroadcaster().broadcast(data);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(b);
=======
resource.getBroadcaster().broadcast(b);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(data);
=======
resource.getBroadcaster().broadcast(data);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(b);
=======
resource.getBroadcaster().broadcast(b);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
Safe
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(data);
=======
resource.getBroadcaster().broadcast(data);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(b);
=======
resource.getBroadcaster().broadcast(b);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(data);
=======
resource.getBroadcaster().broadcast(data);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
<<<<<<< MINE
webSocket.resource().getBroadcaster().broadcast(b);
=======
resource.getBroadcaster().broadcast(b);
>>>>>>> YOURS
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
Unstructured
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
<<<<<<< MINE
=======
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
>>>>>>> YOURS
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
webSocket.resource().getBroadcaster().broadcast(data);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
webSocket.resource().getBroadcaster().broadcast(b);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Simple {@link org.atmosphere.websocket.WebSocketProcessor} that invoke the {@link org.atmosphere.cpr.Broadcaster#broadcast} API when a WebSocket message
* is received.
* <p/>
* NOTE: If WebSocket frame are used the bytes will be decoded into a String, which reduce performance.
*
* @author Jeanfrancois Arcand
*/
public class EchoProtocol implements WebSocketProtocol {
private static final Logger logger = LoggerFactory.getLogger(EchoProtocol.class);
<<<<<<< MINE
=======
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
>>>>>>> YOURS
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String data) {
logger.trace("broadcast String");
webSocket.resource().getBroadcaster().broadcast(data);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] data, int offset, int length) {
logger.trace("broadcast byte");
byte[] b = new byte[length];
System.arraycopy(data, offset, b, 0, length);
webSocket.resource().getBroadcaster().broadcast(b);
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
Diff Result
No diff
Case 4 - java_atmosphere.rev_c044f_2cf68..SimpleHttpProtocol.java
Base
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.HeaderConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketHttpServletResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.Serializable;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereServlet.class);
private String contentType;
private String methodType;
private String delimiter;
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
@Override
public HttpServletRequest parseMessage(AtmosphereResource<HttpServletRequest, HttpServletResponse> resource, String d) {
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
@Override
public HttpServletRequest parseMessage(AtmosphereResource<HttpServletRequest, HttpServletResponse> resource, byte[] d, final int offset, final int length) {
return parseMessage(resource, new String(d,offset,length));
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.HeaderConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketHttpServletResponse;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.Serializable;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(AtmosphereServlet.class);
private String contentType;
private String methodType;
private String delimiter;
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
@Override
public HttpServletRequest parseMessage(AtmosphereResource<HttpServletRequest, HttpServletResponse> resource, String d) {
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
@Override
public HttpServletRequest parseMessage(AtmosphereResource<HttpServletRequest, HttpServletResponse> resource, byte[] d, final int offset, final int length) {
return parseMessage(resource, new String(d,offset,length));
}
}
Left
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.HeaderConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.HeaderConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
Right
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
MergeMethods
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
<<<<<<< MINE
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
=======
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
>>>>>>> YOURS
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
<<<<<<< MINE
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
=======
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
<<<<<<< MINE
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
=======
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
>>>>>>> YOURS
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
<<<<<<< MINE
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
=======
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
KeepBothMethods
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
<<<<<<< MINE
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
=======
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
>>>>>>> YOURS
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
<<<<<<< MINE
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
=======
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
<<<<<<< MINE
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
=======
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
>>>>>>> YOURS
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
<<<<<<< MINE
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
=======
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
Safe
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
<<<<<<< MINE
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
=======
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
>>>>>>> YOURS
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
<<<<<<< MINE
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
=======
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
import org.atmosphere.cpr.AtmosphereResourceImpl;
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
<<<<<<< MINE
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
=======
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
>>>>>>> YOURS
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
<<<<<<< MINE
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
=======
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
Unstructured
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
<<<<<<< MINE
import org.atmosphere.cpr.AtmosphereResourceImpl;
=======
>>>>>>> YOURS
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
<<<<<<< MINE
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.HeaderConfig;
=======
>>>>>>> YOURS
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
<<<<<<< MINE
import java.util.HashMap;
import java.util.Map;
=======
>>>>>>> YOURS
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
<<<<<<< MINE
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
=======
>>>>>>> YOURS
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
/*
* Copyright 2011 Jeanfrancois Arcand
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.websocket.protocol;
import org.atmosphere.cpr.ApplicationConfig;
import org.atmosphere.cpr.AtmosphereRequest;
import org.atmosphere.cpr.AtmosphereResource;
<<<<<<< MINE
import org.atmosphere.cpr.AtmosphereResourceImpl;
=======
>>>>>>> YOURS
import org.atmosphere.cpr.AtmosphereResponse;
import org.atmosphere.cpr.AtmosphereServlet;
<<<<<<< MINE
import org.atmosphere.cpr.FrameworkConfig;
import org.atmosphere.cpr.HeaderConfig;
=======
>>>>>>> YOURS
import org.atmosphere.websocket.WebSocket;
import org.atmosphere.websocket.WebSocketProcessor;
import org.atmosphere.websocket.WebSocketProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Serializable;
<<<<<<< MINE
import java.util.HashMap;
import java.util.Map;
=======
>>>>>>> YOURS
/**
* Like the {@link org.atmosphere.cpr.AsynchronousProcessor} class, this class is responsible for dispatching WebSocket messages to the
* proper {@link org.atmosphere.websocket.WebSocket} implementation by wrapping the Websocket message's bytes within
* an {@link javax.servlet.http.HttpServletRequest}.
* <p/>
* The content-type is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_CONTENT_TYPE} property
* The method is defined using {@link org.atmosphere.cpr.ApplicationConfig#WEBSOCKET_METHOD} property
* <p/>
*
* @author Jeanfrancois Arcand
*/
public class SimpleHttpProtocol implements WebSocketProtocol, Serializable {
private static final Logger logger = LoggerFactory.getLogger(SimpleHttpProtocol.class);
private String contentType;
private String methodType;
private String delimiter;
private AtmosphereResource<HttpServletRequest, HttpServletResponse> resource;
/**
* {@inheritDoc}
*/
@Override
public void configure(AtmosphereServlet.AtmosphereConfig config) {
String contentType = config.getInitParameter(ApplicationConfig.WEBSOCKET_CONTENT_TYPE);
if (contentType == null) {
contentType = "text/html";
}
this.contentType = contentType;
String methodType = config.getInitParameter(ApplicationConfig.WEBSOCKET_METHOD);
if (methodType == null) {
methodType = "POST";
}
this.methodType = methodType;
String delimiter = config.getInitParameter(ApplicationConfig.WEBSOCKET_PATH_DELIMITER);
if (delimiter == null) {
delimiter = "@@";
}
this.delimiter = delimiter;
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, String d) {
<<<<<<< MINE
AtmosphereResourceImpl resource = (AtmosphereResourceImpl) webSocket.resource();
=======
>>>>>>> YOURS
if (resource == null) {
logger.error("Invalid state. No AtmosphereResource has been suspended");
return null;
}
String pathInfo = resource.getRequest().getPathInfo();
if (d.startsWith(delimiter)) {
String[] token = d.split(delimiter);
pathInfo = token[1];
d = token[2];
}
Map<String,Object> m = new HashMap<String, Object>();
m.put(FrameworkConfig.WEBSOCKET_SUBPROTOCOL, FrameworkConfig.SIMPLE_HTTP_OVER_WEBSOCKET);
return new AtmosphereRequest.Builder()
.request(resource.getRequest())
.method(methodType)
.contentType(contentType)
.body(d)
.attributes(m)
.pathInfo(pathInfo)
.headers(WebSocketProcessor.configureHeader(resource.getRequest()))
.build();
}
/**
* {@inheritDoc}
*/
@Override
public AtmosphereRequest onMessage(WebSocket webSocket, byte[] d, final int offset, final int length) {
return onMessage(webSocket, new String(d, offset, length));
}
/**
* {@inheritDoc}
*/
@Override
public void onOpen(WebSocket webSocket) {
<<<<<<< MINE
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.warn(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
=======
// eurk!!
this.resource = (AtmosphereResource<HttpServletRequest, HttpServletResponse>) webSocket.resource();
}
/**
* {@inheritDoc}
*/
@Override
public void onClose(WebSocket webSocket) {
}
/**
* {@inheritDoc}
*/
@Override
public void onError(WebSocket webSocket, WebSocketProcessor.WebSocketException t) {
logger.error(t.getMessage() + " Status {} Message {}", t.response().getStatus(), t.response().getStatusMessage());
>>>>>>> YOURS
}
/**
* {@inheritDoc}
*/
@Override
public boolean inspectResponse() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public String handleResponse(AtmosphereResponse<?> res, String message) {
// Should never be called
return message;
}
@Override
public byte[] handleResponse(AtmosphereResponse<?> res, byte[] message, int offset, int length) {
// Should never be called
return message;
}
}
Diff Result
No diff
Case 5 - java_cassandra.rev_09e54_b8578..SSTableReader.java
Base
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, Long> keyCache;
private BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains("."))
{
int i = descriptor.cfname.indexOf(".");
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(".")
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter)
{
sstable.load(true, savedKeys);
}
else
{
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final Set<DecoratedKey> savedKeys,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), savedKeys, tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = BloomFilter.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
if (descriptor.usesOldBloomFilter)
{
bf = LegacyBloomFilter.serializer().deserialize(stream);
}
else
{
bf = BloomFilter.serializer().deserialize(stream);
}
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try
{
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: SSTable.estimateRowsFromIndex(input); // statistics is supposed to be optional
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true)
{
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey)
{
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
}
else
{
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null)
{
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
}
finally
{
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
assert this.first.compareTo(this.last) <= 0: String.format("SSTable first key %s > last key %s", this.first, this.last);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
private long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
if (descriptor.usesOldBloomFilter)
return LegacyBloomFilter.serializer().serializedSize((LegacyBloomFilter) bf);
else
return BloomFilter.serializer().serializedSize((BloomFilter) bf);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey<?>> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey<?>> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey<?>> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey<?>> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey<?>>()
{
public Iterator<DecoratedKey<?>> iterator()
{
return new Iterator<DecoratedKey<?>>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey<?>)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
long left = getPosition(keyRange.left, Operator.GT);
if (left == -1)
// left is past the end of the file
continue;
long right = getPosition(keyRange.right, Operator.GT);
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, Long info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public Long getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private Long getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public long getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public long getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return -1;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
Long cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, return the 0th position
return op.apply(1) >= 0 ? 0 : -1;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
long dataPosition = input.readLong();
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, dataPosition);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return dataPosition;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return -1;
}
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return -1;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(DecoratedKey decoratedKey)
{
long position = getPosition(decoratedKey, Operator.EQ);
if (position < 0)
return null;
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator
{
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator
{
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator
{
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator
{
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, Long> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, Long> keyCache;
private BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains("."))
{
int i = descriptor.cfname.indexOf(".");
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(".")
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter)
{
sstable.load(true, savedKeys);
}
else
{
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final Set<DecoratedKey> savedKeys,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), savedKeys, tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = BloomFilter.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
if (descriptor.usesOldBloomFilter)
{
bf = LegacyBloomFilter.serializer().deserialize(stream);
}
else
{
bf = BloomFilter.serializer().deserialize(stream);
}
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try
{
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: SSTable.estimateRowsFromIndex(input); // statistics is supposed to be optional
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true)
{
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey)
{
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
}
else
{
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null)
{
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
}
finally
{
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
assert this.first.compareTo(this.last) <= 0: String.format("SSTable first key %s > last key %s", this.first, this.last);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
private long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
if (descriptor.usesOldBloomFilter)
return LegacyBloomFilter.serializer().serializedSize((LegacyBloomFilter) bf);
else
return BloomFilter.serializer().serializedSize((BloomFilter) bf);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey<?>> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey<?>> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey<?>> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey<?>> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey<?>>()
{
public Iterator<DecoratedKey<?>> iterator()
{
return new Iterator<DecoratedKey<?>>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey<?>)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
long left = getPosition(keyRange.left, Operator.GT);
if (left == -1)
// left is past the end of the file
continue;
long right = getPosition(keyRange.right, Operator.GT);
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, Long info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public Long getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private Long getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public long getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public long getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return -1;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
Long cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, return the 0th position
return op.apply(1) >= 0 ? 0 : -1;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
long dataPosition = input.readLong();
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, dataPosition);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return dataPosition;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return -1;
}
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return -1;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(DecoratedKey decoratedKey)
{
long position = getPosition(decoratedKey, Operator.EQ);
if (position < 0)
return null;
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator
{
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator
{
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator
{
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator
{
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, Long> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
Left
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, Long> keyCache;
private BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains("."))
{
int i = descriptor.cfname.indexOf(".");
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(".")
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
private static SSTableReader open(Descriptor descriptor,
Set<Component> components,
Set<DecoratedKey> savedKeys,
DataTracker tracker,
CFMetaData metadata,
IPartitioner partitioner,
boolean validate) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter)
{
sstable.load(true, savedKeys);
}
else
{
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final Set<DecoratedKey> savedKeys,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), savedKeys, tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = BloomFilter.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
if (descriptor.usesOldBloomFilter)
{
bf = LegacyBloomFilter.serializer().deserialize(stream);
}
else
{
bf = BloomFilter.serializer().deserialize(stream);
}
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try
{
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: SSTable.estimateRowsFromIndex(input); // statistics is supposed to be optional
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true)
{
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey)
{
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
}
else
{
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null)
{
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
}
finally
{
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
private void validate()
{
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
private long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
if (descriptor.usesOldBloomFilter)
return LegacyBloomFilter.serializer().serializedSize((LegacyBloomFilter) bf);
else
return BloomFilter.serializer().serializedSize((BloomFilter) bf);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey<?>> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey<?>> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey<?>> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey<?>> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey<?>>()
{
public Iterator<DecoratedKey<?>> iterator()
{
return new Iterator<DecoratedKey<?>>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey<?>)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
long left = getPosition(keyRange.left, Operator.GT);
if (left == -1)
// left is past the end of the file
continue;
long right = getPosition(keyRange.right, Operator.GT);
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, Long info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public Long getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private Long getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public long getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public long getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return -1;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
Long cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, return the 0th position
return op.apply(1) >= 0 ? 0 : -1;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
long dataPosition = input.readLong();
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, dataPosition);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return dataPosition;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return -1;
}
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return -1;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(DecoratedKey decoratedKey)
{
long position = getPosition(decoratedKey, Operator.EQ);
if (position < 0)
return null;
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator
{
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator
{
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator
{
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator
{
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, Long> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, Long> keyCache;
private BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains("."))
{
int i = descriptor.cfname.indexOf(".");
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(".")
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
private static SSTableReader open(Descriptor descriptor,
Set<Component> components,
Set<DecoratedKey> savedKeys,
DataTracker tracker,
CFMetaData metadata,
IPartitioner partitioner,
boolean validate) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter)
{
sstable.load(true, savedKeys);
}
else
{
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final Set<DecoratedKey> savedKeys,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), savedKeys, tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = BloomFilter.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
if (descriptor.usesOldBloomFilter)
{
bf = LegacyBloomFilter.serializer().deserialize(stream);
}
else
{
bf = BloomFilter.serializer().deserialize(stream);
}
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try
{
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: SSTable.estimateRowsFromIndex(input); // statistics is supposed to be optional
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true)
{
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey)
{
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
}
else
{
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null)
{
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
}
finally
{
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
private void validate()
{
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
private long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
if (descriptor.usesOldBloomFilter)
return LegacyBloomFilter.serializer().serializedSize((LegacyBloomFilter) bf);
else
return BloomFilter.serializer().serializedSize((BloomFilter) bf);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey<?>> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey<?>> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey<?>> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey<?>> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey<?>>()
{
public Iterator<DecoratedKey<?>> iterator()
{
return new Iterator<DecoratedKey<?>>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey<?>)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
long left = getPosition(keyRange.left, Operator.GT);
if (left == -1)
// left is past the end of the file
continue;
long right = getPosition(keyRange.right, Operator.GT);
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, Long info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public Long getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private Long getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public long getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public long getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return -1;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
Long cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, return the 0th position
return op.apply(1) >= 0 ? 0 : -1;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
long dataPosition = input.readLong();
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, dataPosition);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return dataPosition;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return -1;
}
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return -1;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(DecoratedKey decoratedKey)
{
long position = getPosition(decoratedKey, Operator.EQ);
if (position < 0)
return null;
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator
{
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator
{
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator
{
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator
{
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, Long> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR))
{
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.version.hasStringsInBloomFilter)
{
sstable.load(true);
}
else
{
sstable.load(false);
sstable.loadBloomFilter();
}
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try
{
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded)
{
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (readIndex) // save summary information to disk
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
logger.debug("Cannot deserialize SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try
{
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
}
catch (IOException e)
{
logger.debug("Cannot save SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
}
finally
{
FileUtils.closeQuietly(oStream);
}
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>()
{
public Iterator<DecoratedKey> iterator()
{
return new Iterator<DecoratedKey>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The index entry corresponding to the key, or null if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, we'll start at the first
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position)
{
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes)
{
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator
{
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator
{
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator
{
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator
{
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore)
{
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException
{
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR))
{
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.version.hasStringsInBloomFilter)
{
sstable.load(true);
}
else
{
sstable.load(false);
sstable.loadBloomFilter();
}
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try
{
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded)
{
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (readIndex) // save summary information to disk
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
logger.debug("Cannot deserialize SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try
{
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
}
catch (IOException e)
{
logger.debug("Cannot save SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
}
finally
{
FileUtils.closeQuietly(oStream);
}
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>()
{
public Iterator<DecoratedKey> iterator()
{
return new Iterator<DecoratedKey>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The index entry corresponding to the key, or null if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, we'll start at the first
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position)
{
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes)
{
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator
{
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator
{
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator
{
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator
{
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore)
{
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException
{
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
MergeMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables) {
long count = 0;
for (SSTableReader sstable : sstables) {
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException {
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)) {
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
} else {
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException {
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR) ? new LocalPartitioner(metadata.getKeyValidator()) : StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException {
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException {
return open(descriptor, components, null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException {
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
private static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner, boolean validate) throws IOException {
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS) ? SSTableMetadata.serializer.deserialize(descriptor) : SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s", descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor, components, metadata, partitioner, null, null, null, null, System.currentTimeMillis(), sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter) {
sstable.load(true, savedKeys);
} else {
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e) {
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries, final DataTracker tracker, final CFMetaData metadata, final IPartitioner partitioner) {
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries) {
Runnable runnable = new Runnable() {
public void run() {
SSTableReader sstable;
try {
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
} catch (IOException ex) {
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try {
executor.awaitTermination(7, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary isummary, Filter bf, long maxDataAge, SSTableMetadata sstableMetadata) throws IOException {
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc, components, metadata, partitioner, ifile, dfile, isummary, bf, maxDataAge, sstableMetadata);
}
private SSTableReader(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary indexSummary, Filter bloomFilter, long maxDataAge, SSTableMetadata sstableMetadata) throws IOException {
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker) {
if (tracker != null) {
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException {
if (!components.contains(Component.FILTER)) {
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try {
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
} finally {
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException {
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression ? SegmentedFile.getCompressedBuilder() : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try {
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed() ? histogramCount : // statistics is supposed to be optional
SSTable.estimateRowsFromIndex(input);
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true) {
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey) {
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
} else {
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null) {
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
} finally {
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
private void validate() {
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException {
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression ? SegmentedFile.getCompressedBuilder() : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try {
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed() ? histogramCount : // statistics is supposed to be optional
estimateRowsFromIndex(primaryIndex);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize) {
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded) {
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
} finally {
FileUtils.closeQuietly(primaryIndex);
}
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (// save summary information to disk
readIndex)
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) {
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try {
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
} catch (IOException e) {
logger.debug("Cannot deserialize SSTable Summary: ", e);
if (summariesFile.exists())
summariesFile.delete();
return false;
} finally {
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) {
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try {
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
} catch (IOException e) {
logger.debug("Cannot save SSTable Summary: ", e);
if (summariesFile.exists())
summariesFile.delete();
} finally {
FileUtils.closeQuietly(oStream);
}
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key) {
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0) {
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
} else {
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata() {
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile) dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures() {
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter() {
return bf;
}
public long getBloomFilterSerializedSize() {
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys() {
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges) {
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes) sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples() {
return indexSummary.getKeys();
}
private static List<Pair<Integer, Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges) {
// use the index to determine a minimal section for each range
List<Pair<Integer, Integer>> positions = new ArrayList<Pair<Integer, Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges)) {
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right) ? samples.size() - 1 : Collections.binarySearch(samples, rightPosition);
if (right < 0) {
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range) {
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>() {
public Iterator<DecoratedKey> iterator() {
return new Iterator<DecoratedKey>() {
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext() {
if (current == null || idx > current.right) {
if (rangeIter.hasNext()) {
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next() {
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey) k;
}
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long, Long>> getPositionsForRanges(Collection<Range<Token>> ranges) {
// use the index to determine a minimal section for each range
List<Pair<Long, Long>> positions = new ArrayList<Pair<Long, Long>>();
for (Range<Token> range : Range.normalize(ranges)) {
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info) {
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null || caching == CFMetaData.Caching.NONE || caching == CFMetaData.Caching.ROWS_ONLY || keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats) {
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats) {
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op) {
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats) {
// first, check bloom filter
if (op == Operator.EQ) {
// EQ only make sense if the key is a valid row key
assert key instanceof DecoratedKey;
if (!bf.isPresent(((DecoratedKey) key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey)) {
DecoratedKey decoratedKey = (DecoratedKey) key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1) {
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext()) {
FileDataInput input = segments.next();
try {
while (!input.isEOF()) {
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0) {
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats) {
// key can be == to the index key only if it's a true row key
assert key instanceof DecoratedKey;
DecoratedKey decoratedKey = (DecoratedKey) key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0) {
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
} catch (IOException e) {
markSuspect();
throw new IOError(e);
} finally {
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength() {
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength() {
return dfile.onDiskLength;
}
public boolean acquireReference() {
while (true) {
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference() {
if (references.decrementAndGet() == 0 && isCompacted.get()) {
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted() {
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try {
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
} catch (IOException e) {
throw new IOError(e);
}
return true;
}
public void markSuspect() {
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect() {
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter) {
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner() {
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range) {
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position) {
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age) {
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException {
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException {
for (Component component : components) {
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes) {
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes) {
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator {
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
static final class Equals extends Operator {
public int apply(int comparison) {
return -comparison;
}
}
static final class GreaterThanOrEqualTo extends Operator {
public int apply(int comparison) {
return comparison >= 0 ? 0 : -comparison;
}
}
static final class GreaterThan extends Operator {
public int apply(int comparison) {
return comparison > 0 ? 0 : 1;
}
}
}
public long getBloomFilterFalsePositiveCount() {
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount() {
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount() {
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount() {
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache() {
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize() {
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount() {
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore) {
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio() {
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition() {
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp() {
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException {
return compression ? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache) : RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException {
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables) {
SSTableReader failed = null;
for (SSTableReader sstable : sstables) {
if (!sstable.acquireReference()) {
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables) {
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables) {
for (SSTableReader sstable : sstables) {
try {
sstable.releaseReference();
} catch (Exception ex) {
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables) {
long count = 0;
for (SSTableReader sstable : sstables) {
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException {
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)) {
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
} else {
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException {
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR) ? new LocalPartitioner(metadata.getKeyValidator()) : StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException {
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException {
return open(descriptor, components, null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException {
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
private static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner, boolean validate) throws IOException {
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS) ? SSTableMetadata.serializer.deserialize(descriptor) : SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s", descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor, components, metadata, partitioner, null, null, null, null, System.currentTimeMillis(), sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter) {
sstable.load(true, savedKeys);
} else {
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e) {
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries, final DataTracker tracker, final CFMetaData metadata, final IPartitioner partitioner) {
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries) {
Runnable runnable = new Runnable() {
public void run() {
SSTableReader sstable;
try {
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
} catch (IOException ex) {
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try {
executor.awaitTermination(7, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary isummary, Filter bf, long maxDataAge, SSTableMetadata sstableMetadata) throws IOException {
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc, components, metadata, partitioner, ifile, dfile, isummary, bf, maxDataAge, sstableMetadata);
}
private SSTableReader(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary indexSummary, Filter bloomFilter, long maxDataAge, SSTableMetadata sstableMetadata) throws IOException {
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker) {
if (tracker != null) {
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException {
if (!components.contains(Component.FILTER)) {
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try {
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
} finally {
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException {
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression ? SegmentedFile.getCompressedBuilder() : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try {
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed() ? histogramCount : // statistics is supposed to be optional
SSTable.estimateRowsFromIndex(input);
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true) {
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey) {
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
} else {
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null) {
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
} finally {
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
private void validate() {
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException {
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression ? SegmentedFile.getCompressedBuilder() : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try {
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed() ? histogramCount : // statistics is supposed to be optional
estimateRowsFromIndex(primaryIndex);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize) {
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded) {
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
} finally {
FileUtils.closeQuietly(primaryIndex);
}
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (// save summary information to disk
readIndex)
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) {
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try {
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
} catch (IOException e) {
logger.debug("Cannot deserialize SSTable Summary: ", e);
if (summariesFile.exists())
summariesFile.delete();
return false;
} finally {
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) {
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try {
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
} catch (IOException e) {
logger.debug("Cannot save SSTable Summary: ", e);
if (summariesFile.exists())
summariesFile.delete();
} finally {
FileUtils.closeQuietly(oStream);
}
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key) {
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0) {
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
} else {
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata() {
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile) dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures() {
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter() {
return bf;
}
public long getBloomFilterSerializedSize() {
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys() {
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges) {
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes) sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples() {
return indexSummary.getKeys();
}
private static List<Pair<Integer, Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges) {
// use the index to determine a minimal section for each range
List<Pair<Integer, Integer>> positions = new ArrayList<Pair<Integer, Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges)) {
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right) ? samples.size() - 1 : Collections.binarySearch(samples, rightPosition);
if (right < 0) {
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range) {
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>() {
public Iterator<DecoratedKey> iterator() {
return new Iterator<DecoratedKey>() {
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext() {
if (current == null || idx > current.right) {
if (rangeIter.hasNext()) {
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next() {
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey) k;
}
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long, Long>> getPositionsForRanges(Collection<Range<Token>> ranges) {
// use the index to determine a minimal section for each range
List<Pair<Long, Long>> positions = new ArrayList<Pair<Long, Long>>();
for (Range<Token> range : Range.normalize(ranges)) {
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info) {
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null || caching == CFMetaData.Caching.NONE || caching == CFMetaData.Caching.ROWS_ONLY || keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats) {
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats) {
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op) {
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats) {
// first, check bloom filter
if (op == Operator.EQ) {
// EQ only make sense if the key is a valid row key
assert key instanceof DecoratedKey;
if (!bf.isPresent(((DecoratedKey) key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey)) {
DecoratedKey decoratedKey = (DecoratedKey) key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1) {
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext()) {
FileDataInput input = segments.next();
try {
while (!input.isEOF()) {
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0) {
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats) {
// key can be == to the index key only if it's a true row key
assert key instanceof DecoratedKey;
DecoratedKey decoratedKey = (DecoratedKey) key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0) {
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
} catch (IOException e) {
markSuspect();
throw new IOError(e);
} finally {
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength() {
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength() {
return dfile.onDiskLength;
}
public boolean acquireReference() {
while (true) {
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference() {
if (references.decrementAndGet() == 0 && isCompacted.get()) {
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted() {
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try {
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
} catch (IOException e) {
throw new IOError(e);
}
return true;
}
public void markSuspect() {
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect() {
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter) {
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner() {
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range) {
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position) {
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age) {
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException {
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException {
for (Component component : components) {
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes) {
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes) {
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator {
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
static final class Equals extends Operator {
public int apply(int comparison) {
return -comparison;
}
}
static final class GreaterThanOrEqualTo extends Operator {
public int apply(int comparison) {
return comparison >= 0 ? 0 : -comparison;
}
}
static final class GreaterThan extends Operator {
public int apply(int comparison) {
return comparison > 0 ? 0 : 1;
}
}
}
public long getBloomFilterFalsePositiveCount() {
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount() {
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount() {
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount() {
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache() {
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize() {
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount() {
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore) {
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio() {
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition() {
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp() {
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException {
return compression ? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache) : RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException {
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables) {
SSTableReader failed = null;
for (SSTableReader sstable : sstables) {
if (!sstable.acquireReference()) {
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables) {
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables) {
for (SSTableReader sstable : sstables) {
try {
sstable.releaseReference();
} catch (Exception ex) {
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
KeepBothMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables) {
long count = 0;
for (SSTableReader sstable : sstables) {
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException {
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)) {
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
} else {
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException {
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR) ? new LocalPartitioner(metadata.getKeyValidator()) : StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException {
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException {
return open(descriptor, components, null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException {
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
private static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner, boolean validate) throws IOException {
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS) ? SSTableMetadata.serializer.deserialize(descriptor) : SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s", descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor, components, metadata, partitioner, null, null, null, null, System.currentTimeMillis(), sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter) {
sstable.load(true, savedKeys);
} else {
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException {
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS) ? SSTableMetadata.serializer.deserialize(descriptor) : SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s", descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor, components, metadata, partitioner, null, null, null, null, System.currentTimeMillis(), sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.version.hasStringsInBloomFilter) {
sstable.load(true);
} else {
sstable.load(false);
sstable.loadBloomFilter();
}
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e) {
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries, final DataTracker tracker, final CFMetaData metadata, final IPartitioner partitioner) {
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries) {
Runnable runnable = new Runnable() {
public void run() {
SSTableReader sstable;
try {
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
} catch (IOException ex) {
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try {
executor.awaitTermination(7, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary isummary, Filter bf, long maxDataAge, SSTableMetadata sstableMetadata) throws IOException {
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc, components, metadata, partitioner, ifile, dfile, isummary, bf, maxDataAge, sstableMetadata);
}
private SSTableReader(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary indexSummary, Filter bloomFilter, long maxDataAge, SSTableMetadata sstableMetadata) throws IOException {
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker) {
if (tracker != null) {
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException {
if (!components.contains(Component.FILTER)) {
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try {
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
} finally {
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException {
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression ? SegmentedFile.getCompressedBuilder() : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try {
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed() ? histogramCount : // statistics is supposed to be optional
SSTable.estimateRowsFromIndex(input);
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true) {
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey) {
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
} else {
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null) {
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
} finally {
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
private void validate() {
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException {
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression ? SegmentedFile.getCompressedBuilder() : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try {
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed() ? histogramCount : // statistics is supposed to be optional
estimateRowsFromIndex(primaryIndex);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize) {
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded) {
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
} finally {
FileUtils.closeQuietly(primaryIndex);
}
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (// save summary information to disk
readIndex)
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) {
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try {
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
} catch (IOException e) {
logger.debug("Cannot deserialize SSTable Summary: ", e);
if (summariesFile.exists())
summariesFile.delete();
return false;
} finally {
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) {
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try {
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
} catch (IOException e) {
logger.debug("Cannot save SSTable Summary: ", e);
if (summariesFile.exists())
summariesFile.delete();
} finally {
FileUtils.closeQuietly(oStream);
}
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key) {
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0) {
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
} else {
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata() {
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile) dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures() {
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter() {
return bf;
}
public long getBloomFilterSerializedSize() {
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys() {
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges) {
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes) sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples() {
return indexSummary.getKeys();
}
private static List<Pair<Integer, Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges) {
// use the index to determine a minimal section for each range
List<Pair<Integer, Integer>> positions = new ArrayList<Pair<Integer, Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges)) {
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right) ? samples.size() - 1 : Collections.binarySearch(samples, rightPosition);
if (right < 0) {
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range) {
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>() {
public Iterator<DecoratedKey> iterator() {
return new Iterator<DecoratedKey>() {
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext() {
if (current == null || idx > current.right) {
if (rangeIter.hasNext()) {
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next() {
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey) k;
}
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long, Long>> getPositionsForRanges(Collection<Range<Token>> ranges) {
// use the index to determine a minimal section for each range
List<Pair<Long, Long>> positions = new ArrayList<Pair<Long, Long>>();
for (Range<Token> range : Range.normalize(ranges)) {
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info) {
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null || caching == CFMetaData.Caching.NONE || caching == CFMetaData.Caching.ROWS_ONLY || keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats) {
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats) {
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op) {
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats) {
// first, check bloom filter
if (op == Operator.EQ) {
// EQ only make sense if the key is a valid row key
assert key instanceof DecoratedKey;
if (!bf.isPresent(((DecoratedKey) key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey)) {
DecoratedKey decoratedKey = (DecoratedKey) key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1) {
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext()) {
FileDataInput input = segments.next();
try {
while (!input.isEOF()) {
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0) {
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats) {
// key can be == to the index key only if it's a true row key
assert key instanceof DecoratedKey;
DecoratedKey decoratedKey = (DecoratedKey) key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0) {
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
} catch (IOException e) {
markSuspect();
throw new IOError(e);
} finally {
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength() {
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength() {
return dfile.onDiskLength;
}
public boolean acquireReference() {
while (true) {
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference() {
if (references.decrementAndGet() == 0 && isCompacted.get()) {
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted() {
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try {
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
} catch (IOException e) {
throw new IOError(e);
}
return true;
}
public void markSuspect() {
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect() {
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter) {
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner() {
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range) {
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position) {
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age) {
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException {
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException {
for (Component component : components) {
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes) {
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes) {
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator {
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
static final class Equals extends Operator {
public int apply(int comparison) {
return -comparison;
}
}
static final class GreaterThanOrEqualTo extends Operator {
public int apply(int comparison) {
return comparison >= 0 ? 0 : -comparison;
}
}
static final class GreaterThan extends Operator {
public int apply(int comparison) {
return comparison > 0 ? 0 : 1;
}
}
}
public long getBloomFilterFalsePositiveCount() {
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount() {
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount() {
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount() {
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache() {
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize() {
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount() {
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore) {
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio() {
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition() {
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp() {
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException {
return compression ? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache) : RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException {
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables) {
SSTableReader failed = null;
for (SSTableReader sstable : sstables) {
if (!sstable.acquireReference()) {
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables) {
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables) {
for (SSTableReader sstable : sstables) {
try {
sstable.releaseReference();
} catch (Exception ex) {
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables) {
long count = 0;
for (SSTableReader sstable : sstables) {
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException {
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)) {
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
} else {
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException {
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR) ? new LocalPartitioner(metadata.getKeyValidator()) : StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException {
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException {
return open(descriptor, components, null, metadata, partitioner);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException {
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
private static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner, boolean validate) throws IOException {
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS) ? SSTableMetadata.serializer.deserialize(descriptor) : SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s", descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor, components, metadata, partitioner, null, null, null, null, System.currentTimeMillis(), sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter) {
sstable.load(true, savedKeys);
} else {
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException {
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS) ? SSTableMetadata.serializer.deserialize(descriptor) : SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s", descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor, components, metadata, partitioner, null, null, null, null, System.currentTimeMillis(), sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.version.hasStringsInBloomFilter) {
sstable.load(true);
} else {
sstable.load(false);
sstable.loadBloomFilter();
}
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e) {
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries, final DataTracker tracker, final CFMetaData metadata, final IPartitioner partitioner) {
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries) {
Runnable runnable = new Runnable() {
public void run() {
SSTableReader sstable;
try {
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
} catch (IOException ex) {
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try {
executor.awaitTermination(7, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary isummary, Filter bf, long maxDataAge, SSTableMetadata sstableMetadata) throws IOException {
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc, components, metadata, partitioner, ifile, dfile, isummary, bf, maxDataAge, sstableMetadata);
}
private SSTableReader(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary indexSummary, Filter bloomFilter, long maxDataAge, SSTableMetadata sstableMetadata) throws IOException {
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker) {
if (tracker != null) {
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException {
if (!components.contains(Component.FILTER)) {
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try {
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
} finally {
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException {
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression ? SegmentedFile.getCompressedBuilder() : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try {
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed() ? histogramCount : // statistics is supposed to be optional
SSTable.estimateRowsFromIndex(input);
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true) {
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey) {
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
} else {
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null) {
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
} finally {
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
private void validate() {
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException {
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression ? SegmentedFile.getCompressedBuilder() : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try {
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed() ? histogramCount : // statistics is supposed to be optional
estimateRowsFromIndex(primaryIndex);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize) {
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded) {
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
} finally {
FileUtils.closeQuietly(primaryIndex);
}
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (// save summary information to disk
readIndex)
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) {
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try {
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
} catch (IOException e) {
logger.debug("Cannot deserialize SSTable Summary: ", e);
if (summariesFile.exists())
summariesFile.delete();
return false;
} finally {
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) {
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try {
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
} catch (IOException e) {
logger.debug("Cannot save SSTable Summary: ", e);
if (summariesFile.exists())
summariesFile.delete();
} finally {
FileUtils.closeQuietly(oStream);
}
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key) {
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0) {
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
} else {
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata() {
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile) dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures() {
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter() {
return bf;
}
public long getBloomFilterSerializedSize() {
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys() {
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges) {
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes) sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples() {
return indexSummary.getKeys();
}
private static List<Pair<Integer, Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges) {
// use the index to determine a minimal section for each range
List<Pair<Integer, Integer>> positions = new ArrayList<Pair<Integer, Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges)) {
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right) ? samples.size() - 1 : Collections.binarySearch(samples, rightPosition);
if (right < 0) {
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range) {
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>() {
public Iterator<DecoratedKey> iterator() {
return new Iterator<DecoratedKey>() {
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext() {
if (current == null || idx > current.right) {
if (rangeIter.hasNext()) {
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next() {
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey) k;
}
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long, Long>> getPositionsForRanges(Collection<Range<Token>> ranges) {
// use the index to determine a minimal section for each range
List<Pair<Long, Long>> positions = new ArrayList<Pair<Long, Long>>();
for (Range<Token> range : Range.normalize(ranges)) {
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info) {
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null || caching == CFMetaData.Caching.NONE || caching == CFMetaData.Caching.ROWS_ONLY || keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats) {
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats) {
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op) {
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats) {
// first, check bloom filter
if (op == Operator.EQ) {
// EQ only make sense if the key is a valid row key
assert key instanceof DecoratedKey;
if (!bf.isPresent(((DecoratedKey) key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey)) {
DecoratedKey decoratedKey = (DecoratedKey) key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1) {
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext()) {
FileDataInput input = segments.next();
try {
while (!input.isEOF()) {
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0) {
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats) {
// key can be == to the index key only if it's a true row key
assert key instanceof DecoratedKey;
DecoratedKey decoratedKey = (DecoratedKey) key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0) {
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
} catch (IOException e) {
markSuspect();
throw new IOError(e);
} finally {
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength() {
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength() {
return dfile.onDiskLength;
}
public boolean acquireReference() {
while (true) {
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference() {
if (references.decrementAndGet() == 0 && isCompacted.get()) {
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted() {
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try {
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
} catch (IOException e) {
throw new IOError(e);
}
return true;
}
public void markSuspect() {
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect() {
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter) {
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner() {
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range) {
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position) {
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age) {
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException {
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException {
for (Component component : components) {
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes) {
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes) {
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator {
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
static final class Equals extends Operator {
public int apply(int comparison) {
return -comparison;
}
}
static final class GreaterThanOrEqualTo extends Operator {
public int apply(int comparison) {
return comparison >= 0 ? 0 : -comparison;
}
}
static final class GreaterThan extends Operator {
public int apply(int comparison) {
return comparison > 0 ? 0 : 1;
}
}
}
public long getBloomFilterFalsePositiveCount() {
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount() {
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount() {
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount() {
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache() {
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize() {
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount() {
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore) {
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio() {
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition() {
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp() {
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException {
return compression ? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache) : RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException {
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables) {
SSTableReader failed = null;
for (SSTableReader sstable : sstables) {
if (!sstable.acquireReference()) {
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables) {
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables) {
for (SSTableReader sstable : sstables) {
try {
sstable.releaseReference();
} catch (Exception ex) {
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
Safe
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR))
{
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, null, metadata, partitioner);
}
<<<<<<< MINE
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.version.hasStringsInBloomFilter)
{
sstable.load(true);
}
else
{
sstable.load(false);
sstable.loadBloomFilter();
}
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
=======
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
>>>>>>> YOURS
private static SSTableReader open(Descriptor descriptor,
Set<Component> components,
Set<DecoratedKey> savedKeys,
DataTracker tracker,
CFMetaData metadata,
IPartitioner partitioner,
boolean validate) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter)
{
sstable.load(true, savedKeys);
}
else
{
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try
{
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: SSTable.estimateRowsFromIndex(input); // statistics is supposed to be optional
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true)
{
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey)
{
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
}
else
{
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null)
{
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
}
finally
{
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
private void validate()
{
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try
{
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded)
{
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (readIndex) // save summary information to disk
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
logger.debug("Cannot deserialize SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try
{
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
}
catch (IOException e)
{
logger.debug("Cannot save SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
}
finally
{
FileUtils.closeQuietly(oStream);
}
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>()
{
public Iterator<DecoratedKey> iterator()
{
return new Iterator<DecoratedKey>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, we'll start at the first
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position)
{
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes)
{
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator {
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator {
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator {
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator {
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore)
{
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException
{
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR))
{
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, null, metadata, partitioner);
}
<<<<<<< MINE
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.version.hasStringsInBloomFilter)
{
sstable.load(true);
}
else
{
sstable.load(false);
sstable.loadBloomFilter();
}
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
=======
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
>>>>>>> YOURS
private static SSTableReader open(Descriptor descriptor,
Set<Component> components,
Set<DecoratedKey> savedKeys,
DataTracker tracker,
CFMetaData metadata,
IPartitioner partitioner,
boolean validate) throws IOException
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.hasStringsInBloomFilter)
{
sstable.load(true, savedKeys);
}
else
{
sstable.load(false, savedKeys);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader input = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
DecoratedKey left = null, right = null;
try
{
long indexSize = input.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: SSTable.estimateRowsFromIndex(input); // statistics is supposed to be optional
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true)
{
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
DecoratedKey decoratedKey = null;
int len = ByteBufferUtil.readShortLength(input);
boolean firstKey = left == null;
boolean lastKey = indexPosition + DBConstants.shortSize + len + DBConstants.longSize == indexSize;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
if (shouldAddEntry || cacheLoading || recreatebloom || firstKey || lastKey)
{
decoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.read(input, len));
if (firstKey)
left = decoratedKey;
if (lastKey)
right = decoratedKey;
}
else
{
FileUtils.skipBytesFully(input, len);
}
long dataPosition = input.readLong();
if (decoratedKey != null)
{
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
// if key cache could be used and we have key already pre-loaded
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
}
finally
{
FileUtils.closeQuietly(input);
}
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
private void validate()
{
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try
{
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded)
{
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (readIndex) // save summary information to disk
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
logger.debug("Cannot deserialize SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try
{
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
}
catch (IOException e)
{
logger.debug("Cannot save SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
}
finally
{
FileUtils.closeQuietly(oStream);
}
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>()
{
public Iterator<DecoratedKey> iterator()
{
return new Iterator<DecoratedKey>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The position in the data file to find the key, or -1 if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, we'll start at the first
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position)
{
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes)
{
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator {
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator {
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator {
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator {
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore)
{
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException
{
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR))
{
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, null, metadata, partitioner);
}
<<<<<<< MINE
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
private static SSTableReader open(Descriptor descriptor,
Set<Component> components,
Set<DecoratedKey> savedKeys,
DataTracker tracker,
CFMetaData metadata,
IPartitioner partitioner,
boolean validate) throws IOException
=======
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
>>>>>>> YOURS
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.version.hasStringsInBloomFilter)
{
sstable.load(true);
}
else
{
sstable.load(false);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try
{
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded)
{
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
<<<<<<< MINE
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
=======
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
>>>>>>> YOURS
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (readIndex) // save summary information to disk
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
logger.debug("Cannot deserialize SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try
{
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
}
catch (IOException e)
{
logger.debug("Cannot save SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
}
finally
{
FileUtils.closeQuietly(oStream);
}
}
private void validate()
{
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>()
{
public Iterator<DecoratedKey> iterator()
{
return new Iterator<DecoratedKey>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The index entry corresponding to the key, or null if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, we'll start at the first
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position)
{
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes)
{
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator
{
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator
{
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator
{
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator
{
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore)
{
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException
{
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.*;
import org.apache.cassandra.cache.KeyCacheKey;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.index.keys.KeysIndex;
import org.apache.cassandra.dht.LocalPartitioner;
import org.apache.cassandra.io.compress.CompressedRandomAccessReader;
import org.apache.cassandra.service.CacheService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.cache.InstrumentingCache;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.compress.CompressionMetadata;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
/**
* SSTableReaders are open()ed by Table.onStart; after that they are created by SSTableWriter.renameAndOpen.
* Do not re-call open() on existing SSTable files; use the references kept by ColumnFamilyStore post-start instead.
*/
public class SSTableReader extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
// guesstimated size of INDEX_INTERVAL index entries
private static final int INDEX_FILE_BUFFER_BYTES = 16 * DatabaseDescriptor.getIndexInterval();
/**
* maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an uppper bound
* to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
* later than maxDataAge.
*
* The field is not serialized to disk, so relying on it for more than what truncate does is not advised.
*
* When a new sstable is flushed, maxDataAge is set to the time of creation.
* When a sstable is created from compaction, maxDataAge is set to max of all merged tables.
*
* The age is in milliseconds since epoc and is local to this host.
*/
public final long maxDataAge;
// indexfile and datafile: might be null before a call to load()
private SegmentedFile ifile;
private SegmentedFile dfile;
private IndexSummary indexSummary;
private Filter bf;
private InstrumentingCache<KeyCacheKey, RowIndexEntry> keyCache;
private final BloomFilterTracker bloomFilterTracker = new BloomFilterTracker();
private final AtomicInteger references = new AtomicInteger(1);
// technically isCompacted is not necessary since it should never be unreferenced unless it is also compacted,
// but it seems like a good extra layer of protection against reference counting bugs to not delete data based on that alone
private final AtomicBoolean isCompacted = new AtomicBoolean(false);
private final AtomicBoolean isSuspect = new AtomicBoolean(false);
private final SSTableDeletingTask deletingTask;
private final SSTableMetadata sstableMetadata;
public static long getApproximateKeyCount(Iterable<SSTableReader> sstables)
{
long count = 0;
for (SSTableReader sstable : sstables)
{
int indexKeyCount = sstable.getKeySamples().size();
count = count + (indexKeyCount + 1) * DatabaseDescriptor.getIndexInterval();
if (logger.isDebugEnabled())
logger.debug("index size for bloom filter calc for file : " + sstable.getFilename() + " : " + count);
}
return count;
}
public static SSTableReader open(Descriptor descriptor) throws IOException
{
CFMetaData metadata;
if (descriptor.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR))
{
int i = descriptor.cfname.indexOf(SECONDARY_INDEX_NAME_SEPARATOR);
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
}
else
{
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
return open(descriptor, metadata);
}
public static SSTableReader open(Descriptor desc, CFMetaData metadata) throws IOException
{
IPartitioner p = desc.cfname.contains(SECONDARY_INDEX_NAME_SEPARATOR)
? new LocalPartitioner(metadata.getKeyValidator())
: StorageService.getPartitioner();
return open(desc, componentsFor(desc), metadata, p);
}
public static SSTableReader openNoValidation(Descriptor descriptor, Set<Component> components, CFMetaData metadata) throws IOException
{
return open(descriptor, components, Collections.<DecoratedKey>emptySet(), null, metadata, StorageService.getPartitioner(), false);
}
public static SSTableReader open(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, null, metadata, partitioner);
}
<<<<<<< MINE
public static SSTableReader open(Descriptor descriptor, Set<Component> components, Set<DecoratedKey> savedKeys, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
{
return open(descriptor, components, savedKeys, tracker, metadata, partitioner, true);
}
private static SSTableReader open(Descriptor descriptor,
Set<Component> components,
Set<DecoratedKey> savedKeys,
DataTracker tracker,
CFMetaData metadata,
IPartitioner partitioner,
boolean validate) throws IOException
=======
public static SSTableReader open(Descriptor descriptor, Set<Component> components, DataTracker tracker, CFMetaData metadata, IPartitioner partitioner) throws IOException
>>>>>>> YOURS
{
assert partitioner != null;
// Minimum components without which we can't do anything
assert components.contains(Component.DATA);
assert components.contains(Component.PRIMARY_INDEX);
long start = System.currentTimeMillis();
logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(COMPONENT_DATA)).length());
SSTableMetadata sstableMetadata = components.contains(Component.STATS)
? SSTableMetadata.serializer.deserialize(descriptor)
: SSTableMetadata.createDefaultInstance();
// Check if sstable is created using same partitioner.
// Partitioner can be null, which indicates older version of sstable or no stats available.
// In that case, we skip the check.
String partitionerName = partitioner.getClass().getCanonicalName();
if (sstableMetadata.partitioner != null && !partitionerName.equals(sstableMetadata.partitioner))
throw new RuntimeException(String.format("Cannot open %s because partitioner does not match %s",
descriptor, partitionerName));
SSTableReader sstable = new SSTableReader(descriptor,
components,
metadata,
partitioner,
null,
null,
null,
null,
System.currentTimeMillis(),
sstableMetadata);
sstable.setTrackedBy(tracker);
// versions before 'c' encoded keys as utf-16 before hashing to the filter
if (descriptor.version.hasStringsInBloomFilter)
{
sstable.load(true);
}
else
{
sstable.load(false);
sstable.loadBloomFilter();
}
if (validate)
sstable.validate();
if (logger.isDebugEnabled())
logger.debug("INDEX LOAD TIME for " + descriptor + ": " + (System.currentTimeMillis() - start) + " ms.");
if (logger.isDebugEnabled() && sstable.getKeyCache() != null)
logger.debug(String.format("key cache contains %s/%s keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity()));
return sstable;
}
public static void logOpenException(Descriptor descriptor, IOException e)
{
if (e instanceof FileNotFoundException)
logger.error("Missing sstable component in " + descriptor + "; skipped because of " + e.getMessage());
else
logger.error("Corrupt sstable " + descriptor + "; skipped", e);
}
public static Collection<SSTableReader> batchOpen(Set<Map.Entry<Descriptor, Set<Component>>> entries,
final DataTracker tracker,
final CFMetaData metadata,
final IPartitioner partitioner)
{
final Collection<SSTableReader> sstables = new LinkedBlockingQueue<SSTableReader>();
ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", Runtime.getRuntime().availableProcessors());
for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
{
Runnable runnable = new Runnable()
{
public void run()
{
SSTableReader sstable;
try
{
sstable = open(entry.getKey(), entry.getValue(), tracker, metadata, partitioner);
}
catch (IOException ex)
{
logger.error("Corrupt sstable " + entry + "; skipped", ex);
return;
}
sstables.add(sstable);
}
};
executor.submit(runnable);
}
executor.shutdown();
try
{
executor.awaitTermination(7, TimeUnit.DAYS);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
return sstables;
}
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
Filter bf,
long maxDataAge,
SSTableMetadata sstableMetadata) throws IOException
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
SSTableMetadata sstableMetadata)
throws IOException
{
super(desc, components, metadata, partitioner);
this.sstableMetadata = sstableMetadata;
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.deletingTask = new SSTableDeletingTask(this);
}
public void setTrackedBy(DataTracker tracker)
{
if (tracker != null)
{
keyCache = CacheService.instance.keyCache;
deletingTask.setTracker(tracker);
}
}
void loadBloomFilter() throws IOException
{
if (!components.contains(Component.FILTER))
{
bf = FilterFactory.emptyFilter();
return;
}
DataInputStream stream = null;
try
{
stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER))));
bf = FilterFactory.deserialize(stream, descriptor.version.filterType);
}
finally
{
FileUtils.closeQuietly(stream);
}
}
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
// try to load summaries from the disk and check if we need
// to read primary index because we should re-create a BloomFilter or pre-load KeyCache
final boolean summaryLoaded = loadSummary(this, ibuilder, dbuilder);
final boolean readIndex = recreatebloom || !summaryLoaded;
try
{
long indexSize = primaryIndex.length();
long histogramCount = sstableMetadata.estimatedRowSize.count();
long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
? histogramCount
: estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional
if (recreatebloom)
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
if (!summaryLoaded)
indexSummary = new IndexSummary(estimatedKeys);
long indexPosition;
while (readIndex && (indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (first == null)
first = decoratedKey;
last = decoratedKey;
if (recreatebloom)
bf.add(decoratedKey.key);
// if summary was already read from disk we don't want to re-populate it using primary index
if (!summaryLoaded)
{
indexSummary.maybeAddEntry(decoratedKey, indexPosition);
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(indexEntry.position);
}
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
<<<<<<< MINE
this.first = getMinimalKey(left);
this.last = getMinimalKey(right);
=======
first = getMinimalKey(first);
last = getMinimalKey(last);
// finalize the load.
indexSummary.complete();
>>>>>>> YOURS
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (readIndex) // save summary information to disk
saveSummary(this, ibuilder, dbuilder);
}
public static boolean loadSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
reader.indexSummary = IndexSummary.serializer.deserialize(iStream, reader.partitioner);
reader.first = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
reader.last = decodeKey(reader.partitioner, reader.descriptor, ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
logger.debug("Cannot deserialize SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
public static void saveSummary(SSTableReader reader, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(reader.descriptor.filenameFor(Component.SUMMARY));
if (summariesFile.exists())
summariesFile.delete();
DataOutputStream oStream = null;
try
{
oStream = new DataOutputStream(new FileOutputStream(summariesFile));
IndexSummary.serializer.serialize(reader.indexSummary, oStream);
ByteBufferUtil.writeWithLength(reader.first.key, oStream);
ByteBufferUtil.writeWithLength(reader.last.key, oStream);
ibuilder.serializeBounds(oStream);
dbuilder.serializeBounds(oStream);
}
catch (IOException e)
{
logger.debug("Cannot save SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
summariesFile.delete();
}
finally
{
FileUtils.closeQuietly(oStream);
}
}
private void validate()
{
if (this.first.compareTo(this.last) > 0)
throw new IllegalStateException(String.format("SSTable first key %s > last key %s", this.first, this.last));
}
/** get the position in the index file to start scanning to find the given key (at most indexInterval keys away) */
public long getIndexScanPosition(RowPosition key)
{
assert indexSummary.getKeys() != null && indexSummary.getKeys().size() > 0;
int index = Collections.binarySearch(indexSummary.getKeys(), key);
if (index < 0)
{
// binary search gives us the first index _greater_ than the key searched for,
// i.e., its insertion position
int greaterThan = (index + 1) * -1;
if (greaterThan == 0)
return -1;
return indexSummary.getPosition(greaterThan - 1);
}
else
{
return indexSummary.getPosition(index);
}
}
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return ((CompressedSegmentedFile)dfile).metadata;
}
/**
* For testing purposes only.
*/
public void forceFilterFailures()
{
bf = LegacyBloomFilter.alwaysMatchingBloomFilter();
}
public Filter getBloomFilter()
{
return bf;
}
public long getBloomFilterSerializedSize()
{
return FilterFactory.serializedSize(bf, descriptor.version.filterType);
}
/**
* @return An estimate of the number of keys in this SSTable.
*/
public long estimatedKeys()
{
return indexSummary.getKeys().size() * DatabaseDescriptor.getIndexInterval();
}
/**
* @param ranges
* @return An estimate of the number of keys for given ranges in this SSTable.
*/
public long estimatedKeysForRanges(Collection<Range<Token>> ranges)
{
long sampleKeyCount = 0;
List<Pair<Integer, Integer>> sampleIndexes = getSampleIndexesForRanges(indexSummary.getKeys(), ranges);
for (Pair<Integer, Integer> sampleIndexRange : sampleIndexes)
sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left + 1);
return Math.max(1, sampleKeyCount * DatabaseDescriptor.getIndexInterval());
}
/**
* @return Approximately 1/INDEX_INTERVALth of the keys in this SSTable.
*/
public Collection<DecoratedKey> getKeySamples()
{
return indexSummary.getKeys();
}
private static List<Pair<Integer,Integer>> getSampleIndexesForRanges(List<DecoratedKey> samples, Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Integer,Integer>> positions = new ArrayList<Pair<Integer,Integer>>();
if (samples.isEmpty())
return positions;
for (Range<Token> range : Range.normalize(ranges))
{
RowPosition leftPosition = range.left.maxKeyBound();
RowPosition rightPosition = range.right.maxKeyBound();
int left = Collections.binarySearch(samples, leftPosition);
if (left < 0)
left = (left + 1) * -1;
else
// left range are start exclusive
left = left + 1;
if (left == samples.size())
// left is past the end of the sampling
continue;
int right = Range.isWrapAround(range.left, range.right)
? samples.size() - 1
: Collections.binarySearch(samples, rightPosition);
if (right < 0)
{
// range are end inclusive so we use the previous index from what binarySearch give us
// since that will be the last index we will return
right = (right + 1) * -1;
if (right == 0)
// Means the first key is already stricly greater that the right bound
continue;
right--;
}
if (left > right)
// empty range
continue;
positions.add(new Pair(Integer.valueOf(left), Integer.valueOf(right)));
}
return positions;
}
public Iterable<DecoratedKey> getKeySamples(final Range<Token> range)
{
final List<DecoratedKey> samples = indexSummary.getKeys();
final List<Pair<Integer, Integer>> indexRanges = getSampleIndexesForRanges(samples, Collections.singletonList(range));
if (indexRanges.isEmpty())
return Collections.emptyList();
return new Iterable<DecoratedKey>()
{
public Iterator<DecoratedKey> iterator()
{
return new Iterator<DecoratedKey>()
{
private Iterator<Pair<Integer, Integer>> rangeIter = indexRanges.iterator();
private Pair<Integer, Integer> current;
private int idx;
public boolean hasNext()
{
if (current == null || idx > current.right)
{
if (rangeIter.hasNext())
{
current = rangeIter.next();
idx = current.left;
return true;
}
return false;
}
return true;
}
public DecoratedKey next()
{
RowPosition k = samples.get(idx++);
// the index should only contain valid row key, we only allow RowPosition in KeyPosition for search purposes
assert k instanceof DecoratedKey;
return (DecoratedKey)k;
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* Determine the minimal set of sections that can be extracted from this SSTable to cover the given ranges.
* @return A sorted list of (offset,end) pairs that cover the given ranges in the datafile for this SSTable.
*/
public List<Pair<Long,Long>> getPositionsForRanges(Collection<Range<Token>> ranges)
{
// use the index to determine a minimal section for each range
List<Pair<Long,Long>> positions = new ArrayList<Pair<Long,Long>>();
for (Range<Token> range : Range.normalize(ranges))
{
AbstractBounds<RowPosition> keyRange = range.toRowBounds();
RowIndexEntry idxLeft = getPosition(keyRange.left, Operator.GT);
long left = idxLeft == null ? -1 : idxLeft.position;
if (left == -1)
// left is past the end of the file
continue;
RowIndexEntry idxRight = getPosition(keyRange.right, Operator.GT);
long right = idxRight == null ? -1 : idxRight.position;
if (right == -1 || Range.isWrapAround(range.left, range.right))
// right is past the end of the file, or it wraps
right = uncompressedLength();
if (left == right)
// empty range
continue;
positions.add(new Pair(Long.valueOf(left), Long.valueOf(right)));
}
return positions;
}
public void cacheKey(DecoratedKey key, RowIndexEntry info)
{
CFMetaData.Caching caching = metadata.getCaching();
if (keyCache == null
|| caching == CFMetaData.Caching.NONE
|| caching == CFMetaData.Caching.ROWS_ONLY
|| keyCache.getCapacity() == 0)
return;
// avoid keeping a permanent reference to the original key buffer
keyCache.put(new KeyCacheKey(descriptor, ByteBufferUtil.clone(key.key)), info);
}
public RowIndexEntry getCachedPosition(DecoratedKey key, boolean updateStats)
{
return getCachedPosition(new KeyCacheKey(descriptor, key.key), updateStats);
}
private RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
if (keyCache != null && keyCache.getCapacity() > 0)
return updateStats ? keyCache.get(unifiedKey) : keyCache.getInternal(unifiedKey);
return null;
}
/**
* Get position updating key cache and stats.
* @see #getPosition(org.apache.cassandra.db.RowPosition, org.apache.cassandra.io.sstable.SSTableReader.Operator, boolean)
*/
public RowIndexEntry getPosition(RowPosition key, Operator op)
{
return getPosition(key, op, true);
}
/**
* @param key The key to apply as the rhs to the given Operator. A 'fake' key is allowed to
* allow key selection by token bounds but only if op != * EQ
* @param op The Operator defining matching keys: the nearest key to the target matching the operator wins.
* @param updateCacheAndStats true if updating stats and cache
* @return The index entry corresponding to the key, or null if the key is not present
*/
public RowIndexEntry getPosition(RowPosition key, Operator op, boolean updateCacheAndStats)
{
// first, check bloom filter
if (op == Operator.EQ)
{
assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key
if (!bf.isPresent(((DecoratedKey)key).key))
return null;
}
// next, the key cache (only make sense for valid row key)
if ((op == Operator.EQ || op == Operator.GE) && (key instanceof DecoratedKey))
{
DecoratedKey decoratedKey = (DecoratedKey)key;
RowIndexEntry cachedPosition = getCachedPosition(new KeyCacheKey(descriptor, decoratedKey.key), updateCacheAndStats);
if (cachedPosition != null)
return cachedPosition;
}
// next, see if the sampled index says it's impossible for the key to be present
long sampledPosition = getIndexScanPosition(key);
if (sampledPosition == -1)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
// we matched the -1th position: if the operator might match forward, we'll start at the first
// position. We however need to return the correct index entry for that first position.
if (op.apply(1) >= 0)
sampledPosition = 0;
else
return null;
}
// scan the on-disk index, starting at the nearest sampled position
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition, INDEX_FILE_BUFFER_BYTES);
while (segments.hasNext())
{
FileDataInput input = segments.next();
try
{
while (!input.isEOF())
{
// read key & data position from index entry
DecoratedKey indexDecoratedKey = decodeKey(partitioner, descriptor, ByteBufferUtil.readWithShortLength(input));
int comparison = indexDecoratedKey.compareTo(key);
int v = op.apply(comparison);
if (v == 0)
{
RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(input, descriptor.version);
if (comparison == 0 && keyCache != null && keyCache.getCapacity() > 0 && updateCacheAndStats)
{
assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
DecoratedKey decoratedKey = (DecoratedKey)key;
// store exact match for the key
cacheKey(decoratedKey, indexEntry);
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addTruePositive();
return indexEntry;
}
if (v < 0)
{
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
RowIndexEntry.serializer.skip(input, descriptor.version);
}
}
catch (IOException e)
{
markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(input);
}
}
if (op == Operator.EQ && updateCacheAndStats)
bloomFilterTracker.addFalsePositive();
return null;
}
/**
* @return The length in bytes of the data for this SSTable. For
* compressed files, this is not the same thing as the on disk size (see
* onDiskLength())
*/
public long uncompressedLength()
{
return dfile.length;
}
/**
* @return The length in bytes of the on disk size for this SSTable. For
* compressed files, this is not the same thing as the data length (see
* length())
*/
public long onDiskLength()
{
return dfile.onDiskLength;
}
public boolean acquireReference()
{
while (true)
{
int n = references.get();
if (n <= 0)
return false;
if (references.compareAndSet(n, n + 1))
return true;
}
}
public void releaseReference()
{
if (references.decrementAndGet() == 0 && isCompacted.get())
{
// Force finalizing mmapping if necessary
ifile.cleanup();
dfile.cleanup();
deletingTask.schedule();
}
assert references.get() >= 0 : "Reference counter " + references.get() + " for " + dfile.path;
}
/**
* Mark the sstable as compacted.
* When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere
* except for threads holding a reference.
*
* @return true if the this is the first time the file was marked compacted. With rare exceptions
* (see DataTracker.unmarkCompacted) calling this multiple times would be buggy.
*/
public boolean markCompacted()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " compacted");
if (isCompacted.getAndSet(true))
return false;
try
{
if (!new File(descriptor.filenameFor(Component.COMPACTED_MARKER)).createNewFile())
throw new IOException("Compaction marker already exists");
}
catch (IOException e)
{
throw new IOError(e);
}
return true;
}
public void markSuspect()
{
if (logger.isDebugEnabled())
logger.debug("Marking " + getFilename() + " as a suspect for blacklisting.");
isSuspect.getAndSet(true);
}
public boolean isMarkedSuspect()
{
return isSuspect.get();
}
/**
*
* @param filter filter to use when reading the columns
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getScanner(QueryFilter filter)
{
return new SSTableScanner(this, filter);
}
/**
* Direct I/O SSTableScanner
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner()
{
return new SSTableScanner(this, true);
}
/**
* Direct I/O SSTableScanner over a defined range of tokens.
*
* @param range the range of keys to cover
* @return A Scanner for seeking over the rows of the SSTable.
*/
public SSTableScanner getDirectScanner(Range<Token> range)
{
if (range == null)
return getDirectScanner();
return new SSTableBoundedScanner(this, true, range);
}
public FileDataInput getFileDataInput(long position)
{
return dfile.getSegment(position);
}
/**
* Tests if the sstable contains data newer than the given age param (in localhost currentMilli time).
* This works in conjunction with maxDataAge which is an upper bound on the create of data in this sstable.
* @param age The age to compare the maxDataAre of this sstable. Measured in millisec since epoc on this host
* @return True iff this sstable contains data that's newer than the given age parameter.
*/
public boolean newSince(long age)
{
return maxDataAge > age;
}
public static long readRowSize(DataInput in, Descriptor d) throws IOException
{
if (d.version.hasIntRowSize)
return in.readInt();
return in.readLong();
}
public void createLinks(String snapshotDirectoryPath) throws IOException
{
for (Component component : components)
{
File sourceFile = new File(descriptor.filenameFor(component));
File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
CLibrary.createHardLink(sourceFile, targetLink);
}
}
/**
* Conditionally use the deprecated 'IPartitioner.convertFromDiskFormat' method.
*/
public static DecoratedKey decodeKey(IPartitioner p, Descriptor d, ByteBuffer bytes)
{
if (d.version.hasEncodedKeys)
return p.convertFromDiskFormat(bytes);
return p.decorateKey(bytes);
}
public DecoratedKey decodeKey(ByteBuffer bytes)
{
return decodeKey(partitioner, descriptor, bytes);
}
/**
* TODO: Move someplace reusable
*/
public abstract static class Operator
{
public static final Operator EQ = new Equals();
public static final Operator GE = new GreaterThanOrEqualTo();
public static final Operator GT = new GreaterThan();
/**
* @param comparison The result of a call to compare/compareTo, with the desired field on the rhs.
* @return less than 0 if the operator cannot match forward, 0 if it matches, greater than 0 if it might match forward.
*/
public abstract int apply(int comparison);
final static class Equals extends Operator
{
public int apply(int comparison) { return -comparison; }
}
final static class GreaterThanOrEqualTo extends Operator
{
public int apply(int comparison) { return comparison >= 0 ? 0 : -comparison; }
}
final static class GreaterThan extends Operator
{
public int apply(int comparison) { return comparison > 0 ? 0 : 1; }
}
}
public long getBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getFalsePositiveCount();
}
public long getRecentBloomFilterFalsePositiveCount()
{
return bloomFilterTracker.getRecentFalsePositiveCount();
}
public long getBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getTruePositiveCount();
}
public long getRecentBloomFilterTruePositiveCount()
{
return bloomFilterTracker.getRecentTruePositiveCount();
}
public InstrumentingCache<KeyCacheKey, RowIndexEntry> getKeyCache()
{
return keyCache;
}
public EstimatedHistogram getEstimatedRowSize()
{
return sstableMetadata.estimatedRowSize;
}
public EstimatedHistogram getEstimatedColumnCount()
{
return sstableMetadata.estimatedColumnCount;
}
public double getEstimatedDroppableTombstoneRatio(int gcBefore)
{
return sstableMetadata.getEstimatedDroppableTombstoneRatio(gcBefore);
}
public double getCompressionRatio()
{
return sstableMetadata.compressionRatio;
}
public ReplayPosition getReplayPosition()
{
return sstableMetadata.replayPosition;
}
public long getMaxTimestamp()
{
return sstableMetadata.maxTimestamp;
}
public RandomAccessReader openDataReader(boolean skipIOCache) throws IOException
{
return compression
? CompressedRandomAccessReader.open(getFilename(), getCompressionMetadata(), skipIOCache)
: RandomAccessReader.open(new File(getFilename()), skipIOCache);
}
public RandomAccessReader openIndexReader(boolean skipIOCache) throws IOException
{
return RandomAccessReader.open(new File(getIndexFilename()), skipIOCache);
}
/**
* @param sstables
* @return true if all desired references were acquired. Otherwise, it will unreference any partial acquisition, and return false.
*/
public static boolean acquireReferences(Iterable<SSTableReader> sstables)
{
SSTableReader failed = null;
for (SSTableReader sstable : sstables)
{
if (!sstable.acquireReference())
{
failed = sstable;
break;
}
}
if (failed == null)
return true;
for (SSTableReader sstable : sstables)
{
if (sstable == failed)
break;
sstable.releaseReference();
}
return false;
}
public static void releaseReferences(Iterable<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
{
try
{
sstable.releaseReference();
}
catch (Exception ex)
{
logger.error("Failed releasing reference on " + sstable, ex);
}
}
}
}
Diff Result
No diff
Case 6 - java_cassandra.rev_29a8b_c5ee0..Message.java
Base
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message
{
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
Message(Header header, byte[] body)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body)
{
this(new Header(from, verb), body);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
public String getMessageId()
{
return header_.getMessageId();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
public Message getReply(InetAddress from, byte[] args)
{
Header header = new Header(getMessageId(), from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
public Message getInternalReply(byte[] body)
{
Header header = new Header(getMessageId(), FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("ID:" + getMessageId())
.append(separator)
.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message>
{
public void serialize(Message t, DataOutputStream dos) throws IOException
{
Header.serializer().serialize( t.header_, dos);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis) throws IOException
{
Header header = Header.serializer().deserialize(dis);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
// return new Message(header.getMessageId(), header.getFrom(), header.getMessageType(), header.getVerb(), new Object[]{bytes});
return new Message(header, bytes);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message
{
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
Message(Header header, byte[] body)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body)
{
this(new Header(from, verb), body);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
public String getMessageId()
{
return header_.getMessageId();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
public Message getReply(InetAddress from, byte[] args)
{
Header header = new Header(getMessageId(), from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
public Message getInternalReply(byte[] body)
{
Header header = new Header(getMessageId(), FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("ID:" + getMessageId())
.append(separator)
.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message>
{
public void serialize(Message t, DataOutputStream dos) throws IOException
{
Header.serializer().serialize( t.header_, dos);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis) throws IOException
{
Header header = Header.serializer().deserialize(dis);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
// return new Message(header.getMessageId(), header.getFrom(), header.getMessageType(), header.getVerb(), new Object[]{bytes});
return new Message(header, bytes);
}
}
}
Left
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message
{
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
Message(Header header, byte[] body)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body)
{
this(new Header(from, verb), body);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] args)
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
public Message getInternalReply(byte[] body)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message>
{
public void serialize(Message t, DataOutputStream dos) throws IOException
{
Header.serializer().serialize( t.header_, dos);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis) throws IOException
{
Header header = Header.serializer().deserialize(dis);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
// return new Message(header.getMessageId(), header.getFrom(), header.getMessageType(), header.getVerb(), new Object[]{bytes});
return new Message(header, bytes);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message
{
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
Message(Header header, byte[] body)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body)
{
this(new Header(from, verb), body);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] args)
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
public Message getInternalReply(byte[] body)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message>
{
public void serialize(Message t, DataOutputStream dos) throws IOException
{
Header.serializer().serialize( t.header_, dos);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis) throws IOException
{
Header header = Header.serializer().deserialize(dis);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
// return new Message(header.getMessageId(), header.getFrom(), header.getMessageType(), header.getVerb(), new Object[]{bytes});
return new Message(header, bytes);
}
}
}
Right
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message
{
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version)
{
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public int getVersion()
{
return version;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] body, int version)
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, body, version);
}
public Message getInternalReply(byte[] body, int version)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body, version);
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message>
{
public void serialize(Message t, DataOutputStream dos, int version) throws IOException
{
assert t.getVersion() == version : "internode protocol version mismatch"; // indicates programmer error.
Header.serializer().serialize( t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException
{
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message
{
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version)
{
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public int getVersion()
{
return version;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] body, int version)
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, body, version);
}
public Message getInternalReply(byte[] body, int version)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body, version);
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message>
{
public void serialize(Message t, DataOutputStream dos, int version) throws IOException
{
assert t.getVersion() == version : "internode protocol version mismatch"; // indicates programmer error.
Header.serializer().serialize( t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException
{
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
MergeMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message {
private static ICompactSerializer<Message> serializer_;
static {
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer() {
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version) {
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version) {
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key) {
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value) {
header_.setDetail(key, value);
}
public void removeHeader(String key) {
header_.removeDetail(key);
}
public byte[] getMessageBody() {
return body_;
}
public int getVersion() {
return version;
}
public InetAddress getFrom() {
return header_.getFrom();
}
public Stage getMessageType() {
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb() {
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] body, int version) {
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
public Message getInternalReply(byte[] body, int version) {
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
public String toString() {
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom()).append(separator).append("TYPE:" + getMessageType()).append(separator).append("VERB:" + getVerb()).append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message> {
public void serialize(Message t, DataOutputStream dos, int version) throws IOException {
// indicates programmer error.
assert t.getVersion() == version : "internode protocol version mismatch";
Header.serializer().serialize(t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException {
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message {
private static ICompactSerializer<Message> serializer_;
static {
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer() {
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version) {
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version) {
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key) {
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value) {
header_.setDetail(key, value);
}
public void removeHeader(String key) {
header_.removeDetail(key);
}
public byte[] getMessageBody() {
return body_;
}
public int getVersion() {
return version;
}
public InetAddress getFrom() {
return header_.getFrom();
}
public Stage getMessageType() {
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb() {
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] body, int version) {
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
public Message getInternalReply(byte[] body, int version) {
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
public String toString() {
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom()).append(separator).append("TYPE:" + getMessageType()).append(separator).append("VERB:" + getVerb()).append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message> {
public void serialize(Message t, DataOutputStream dos, int version) throws IOException {
// indicates programmer error.
assert t.getVersion() == version : "internode protocol version mismatch";
Header.serializer().serialize(t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException {
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
KeepBothMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message {
private static ICompactSerializer<Message> serializer_;
static {
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer() {
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version) {
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version) {
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key) {
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value) {
header_.setDetail(key, value);
}
public void removeHeader(String key) {
header_.removeDetail(key);
}
public byte[] getMessageBody() {
return body_;
}
public int getVersion() {
return version;
}
public InetAddress getFrom() {
return header_.getFrom();
}
public Stage getMessageType() {
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb() {
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] args) {
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
public Message getInternalReply(byte[] body) {
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] body, int version) {
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, body, version);
}
public Message getInternalReply(byte[] body, int version) {
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body, version);
}
public String toString() {
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom()).append(separator).append("TYPE:" + getMessageType()).append(separator).append("VERB:" + getVerb()).append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message> {
public void serialize(Message t, DataOutputStream dos, int version) throws IOException {
// indicates programmer error.
assert t.getVersion() == version : "internode protocol version mismatch";
Header.serializer().serialize(t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException {
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message {
private static ICompactSerializer<Message> serializer_;
static {
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer() {
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version) {
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version) {
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key) {
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value) {
header_.setDetail(key, value);
}
public void removeHeader(String key) {
header_.removeDetail(key);
}
public byte[] getMessageBody() {
return body_;
}
public int getVersion() {
return version;
}
public InetAddress getFrom() {
return header_.getFrom();
}
public Stage getMessageType() {
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb() {
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] args) {
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
public Message getInternalReply(byte[] body) {
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
public Message getReply(InetAddress from, byte[] body, int version) {
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, body, version);
}
public Message getInternalReply(byte[] body, int version) {
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body, version);
}
public String toString() {
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom()).append(separator).append("TYPE:" + getMessageType()).append(separator).append("VERB:" + getVerb()).append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message> {
public void serialize(Message t, DataOutputStream dos, int version) throws IOException {
// indicates programmer error.
assert t.getVersion() == version : "internode protocol version mismatch";
Header.serializer().serialize(t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException {
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
Safe
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message {
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version)
{
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public int getVersion()
{
return version;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
<<<<<<< MINE
public Message getReply(InetAddress from, byte[] body, int version)
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, body, version);
}
=======
public Message getReply(InetAddress from, byte[] args)
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
>>>>>>> YOURS
public Message getInternalReply(byte[] body)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
public Message getInternalReply(byte[] body, int version)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body, version);
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message> {
public void serialize(Message t, DataOutputStream dos, int version) throws IOException
{
assert t.getVersion() == version : "internode protocol version mismatch"; // indicates programmer error.
Header.serializer().serialize( t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException
{
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message {
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version)
{
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public int getVersion()
{
return version;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
<<<<<<< MINE
public Message getReply(InetAddress from, byte[] body, int version)
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, body, version);
}
=======
public Message getReply(InetAddress from, byte[] args)
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
return new Message(header, args);
}
>>>>>>> YOURS
public Message getInternalReply(byte[] body)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body);
}
public Message getInternalReply(byte[] body, int version)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
return new Message(header, body, version);
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message> {
public void serialize(Message t, DataOutputStream dos, int version) throws IOException
{
assert t.getVersion() == version : "internode protocol version mismatch"; // indicates programmer error.
Header.serializer().serialize( t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException
{
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
Unstructured
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message
{
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version)
{
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public int getVersion()
{
return version;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
<<<<<<< MINE
public Message getReply(InetAddress from, byte[] args)
=======
public Message getReply(InetAddress from, byte[] body, int version)
>>>>>>> YOURS
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
<<<<<<< MINE
return new Message(header, args);
=======
return new Message(header, body, version);
>>>>>>> YOURS
}
public Message getInternalReply(byte[] body, int version)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
<<<<<<< MINE
return new Message(header, body);
=======
return new Message(header, body, version);
>>>>>>> YOURS
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message>
{
public void serialize(Message t, DataOutputStream dos, int version) throws IOException
{
assert t.getVersion() == version : "internode protocol version mismatch"; // indicates programmer error.
Header.serializer().serialize( t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException
{
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.io.ICompactSerializer;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
public class Message
{
private static ICompactSerializer<Message> serializer_;
static
{
serializer_ = new MessageSerializer();
}
public static ICompactSerializer<Message> serializer()
{
return serializer_;
}
final Header header_;
private final byte[] body_;
private final transient int version;
private Message(Header header, byte[] body, int version)
{
assert header != null;
assert body != null;
header_ = header;
body_ = body;
this.version = version;
}
public Message(InetAddress from, StorageService.Verb verb, byte[] body, int version)
{
this(new Header(from, verb), body, version);
}
public byte[] getHeader(String key)
{
return header_.getDetail(key);
}
public void setHeader(String key, byte[] value)
{
header_.setDetail(key, value);
}
public void removeHeader(String key)
{
header_.removeDetail(key);
}
public byte[] getMessageBody()
{
return body_;
}
public int getVersion()
{
return version;
}
public InetAddress getFrom()
{
return header_.getFrom();
}
public Stage getMessageType()
{
return StorageService.verbStages.get(getVerb());
}
public StorageService.Verb getVerb()
{
return header_.getVerb();
}
// TODO should take byte[] + length so we don't have to copy to a byte[] of exactly the right len
// TODO make static
<<<<<<< MINE
public Message getReply(InetAddress from, byte[] args)
=======
public Message getReply(InetAddress from, byte[] body, int version)
>>>>>>> YOURS
{
Header header = new Header(from, StorageService.Verb.REQUEST_RESPONSE);
<<<<<<< MINE
return new Message(header, args);
=======
return new Message(header, body, version);
>>>>>>> YOURS
}
public Message getInternalReply(byte[] body, int version)
{
Header header = new Header(FBUtilities.getLocalAddress(), StorageService.Verb.INTERNAL_RESPONSE);
<<<<<<< MINE
return new Message(header, body);
=======
return new Message(header, body, version);
>>>>>>> YOURS
}
public String toString()
{
StringBuilder sbuf = new StringBuilder("");
String separator = System.getProperty("line.separator");
sbuf.append("FROM:" + getFrom())
.append(separator)
.append("TYPE:" + getMessageType())
.append(separator)
.append("VERB:" + getVerb())
.append(separator);
return sbuf.toString();
}
private static class MessageSerializer implements ICompactSerializer<Message>
{
public void serialize(Message t, DataOutputStream dos, int version) throws IOException
{
assert t.getVersion() == version : "internode protocol version mismatch"; // indicates programmer error.
Header.serializer().serialize( t.header_, dos, version);
byte[] bytes = t.getMessageBody();
dos.writeInt(bytes.length);
dos.write(bytes);
}
public Message deserialize(DataInputStream dis, int version) throws IOException
{
Header header = Header.serializer().deserialize(dis, version);
int size = dis.readInt();
byte[] bytes = new byte[size];
dis.readFully(bytes);
return new Message(header, bytes, version);
}
}
}
Diff Result
No diff
Case 7 - java_cassandra.rev_2ce7b_e863c..AlterTableStatement.java
Base
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.util.*;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.db.marshal.CompositeType;
import org.apache.cassandra.db.marshal.CounterColumnType;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import org.apache.cassandra.thrift.InvalidRequestException;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement
{
public static enum Type
{
ADD, ALTER, DROP, OPTS
}
public final Type oType;
public final String validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps = new CFPropDefs();
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, Map<String, String> propertyMap)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps.addAll(propertyMap);
}
public void checkAccess(ClientState state) throws InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws InvalidRequestException, ConfigurationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
CFPropDefs.parseType(validator),
null,
null,
null,
cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = CFPropDefs.parseType(validator);
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
cfm.keyValidator(newType);
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, CFPropDefs.parseType(validator));
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(CFPropDefs.parseType(validator));
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(CFPropDefs.parseType(validator));
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.util.*;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.db.marshal.CompositeType;
import org.apache.cassandra.db.marshal.CounterColumnType;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import org.apache.cassandra.thrift.InvalidRequestException;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement
{
public static enum Type
{
ADD, ALTER, DROP, OPTS
}
public final Type oType;
public final String validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps = new CFPropDefs();
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, Map<String, String> propertyMap)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps.addAll(propertyMap);
}
public void checkAccess(ClientState state) throws InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws InvalidRequestException, ConfigurationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
CFPropDefs.parseType(validator),
null,
null,
null,
cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = CFPropDefs.parseType(validator);
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
cfm.keyValidator(newType);
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, CFPropDefs.parseType(validator));
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(CFPropDefs.parseType(validator));
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(CFPropDefs.parseType(validator));
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
Left
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.util.*;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import org.apache.cassandra.thrift.InvalidRequestException;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement
{
public static enum Type
{
ADD, ALTER, DROP, OPTS
}
public final Type oType;
public final String validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
public void checkAccess(ClientState state) throws InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws InvalidRequestException, ConfigurationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
CFPropDefs.parseType(validator),
null,
null,
null,
cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = CFPropDefs.parseType(validator);
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
cfm.keyValidator(newType);
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, CFPropDefs.parseType(validator));
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(CFPropDefs.parseType(validator));
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(CFPropDefs.parseType(validator));
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.util.*;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import org.apache.cassandra.thrift.InvalidRequestException;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement
{
public static enum Type
{
ADD, ALTER, DROP, OPTS
}
public final Type oType;
public final String validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
public void checkAccess(ClientState state) throws InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws InvalidRequestException, ConfigurationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
CFPropDefs.parseType(validator),
null,
null,
null,
cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = CFPropDefs.parseType(validator);
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
cfm.keyValidator(newType);
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, CFPropDefs.parseType(validator));
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(CFPropDefs.parseType(validator));
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(CFPropDefs.parseType(validator));
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement
{
public static enum Type
{
ADD, ALTER, DROP, OPTS
}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType)
{
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections
? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined)
: new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType)type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
type,
null,
null,
null,
componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey)
{
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
}
else
{
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement
{
public static enum Type
{
ADD, ALTER, DROP, OPTS
}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType)
{
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections
? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined)
: new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType)type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
type,
null,
null,
null,
componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey)
{
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
}
else
{
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
MergeMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement {
public static enum Type {
ADD , ALTER , DROP , OPTS}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
<<<<<<< MINE
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
=======
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
>>>>>>> YOURS
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType)
{
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections
? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined)
: new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType)type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
type,
null,
null,
null,
componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey)
{
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
}
else
{
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement {
public static enum Type {
ADD , ALTER , DROP , OPTS}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
<<<<<<< MINE
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
=======
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
>>>>>>> YOURS
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType)
{
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections
? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined)
: new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType)type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
type,
null,
null,
null,
componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey)
{
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
}
else
{
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
KeepBothMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement {
public static enum Type {
ADD(), ALTER(), DROP(), OPTS()
}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps) {
super(name);
this.oType = type;
this.columnName = columnName;
// used only for ADD/ALTER commands
this.validator = validator;
this.cfProps = cfProps;
}
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps) {
super(name);
this.oType = type;
this.columnName = columnName;
// used only for ADD/ALTER commands
this.validator = validator;
this.cfProps = cfProps;
}
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException {
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException {
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch(oType) {
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null) {
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType) meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType) {
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections ? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined) : new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType) type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key, type, null, null, null, componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch(name.kind) {
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey) {
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
} else {
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values()) {
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString() {
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)", cfName, oType, columnName, validator);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement {
public static enum Type {
ADD(), ALTER(), DROP(), OPTS()
}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps) {
super(name);
this.oType = type;
this.columnName = columnName;
// used only for ADD/ALTER commands
this.validator = validator;
this.cfProps = cfProps;
}
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps) {
super(name);
this.oType = type;
this.columnName = columnName;
// used only for ADD/ALTER commands
this.validator = validator;
this.cfProps = cfProps;
}
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException {
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException {
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch(oType) {
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null) {
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType) meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType) {
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections ? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined) : new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType) type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key, type, null, null, null, componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch(name.kind) {
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey) {
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
} else {
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values()) {
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString() {
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)", cfName, oType, columnName, validator);
}
}
Safe
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement {
public static enum Type {
ADD , ALTER , DROP , OPTS}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
<<<<<<< MINE
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
=======
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
>>>>>>> YOURS
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType)
{
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections
? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined)
: new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType)type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
type,
null,
null,
null,
componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey)
{
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
}
else
{
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement {
public static enum Type {
ADD , ALTER , DROP , OPTS}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
<<<<<<< MINE
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
=======
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps)
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
>>>>>>> YOURS
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType)
{
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections
? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined)
: new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType)type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
type,
null,
null,
null,
componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey)
{
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
}
else
{
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
<<<<<<< MINE
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.marshal.*;
=======
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
>>>>>>> YOURS
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement
{
public static enum Type
{
ADD, ALTER, DROP, OPTS
}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
<<<<<<< MINE
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps)
=======
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps)
>>>>>>> YOURS
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType)
{
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections
? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined)
: new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType)type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
type,
null,
null,
null,
componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey)
{
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
}
else
{
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql3.*;
<<<<<<< MINE
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.marshal.*;
=======
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
>>>>>>> YOURS
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.MigrationManager;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
public class AlterTableStatement extends SchemaAlteringStatement
{
public static enum Type
{
ADD, ALTER, DROP, OPTS
}
public final Type oType;
public final ParsedType validator;
public final ColumnIdentifier columnName;
private final CFPropDefs cfProps;
<<<<<<< MINE
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, String validator, CFPropDefs cfProps)
=======
public AlterTableStatement(CFName name, Type type, ColumnIdentifier columnName, ParsedType validator, CFPropDefs cfProps)
>>>>>>> YOURS
{
super(name);
this.oType = type;
this.columnName = columnName;
this.validator = validator; // used only for ADD/ALTER commands
this.cfProps = cfProps;
}
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void announceMigration() throws RequestValidationException
{
CFMetaData meta = validateColumnFamily(keyspace(), columnFamily());
CFMetaData cfm = meta.clone();
CFDefinition cfDef = meta.getCfDef();
CFDefinition.Name name = this.oType == Type.OPTS ? null : cfDef.get(columnName);
switch (oType)
{
case ADD:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot add new column to a compact CF");
if (name != null)
{
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
Integer componentIndex = cfDef.isComposite ? ((CompositeType)meta.comparator).types.size() - 1 : null;
AbstractType<?> type = validator.getType();
if (type instanceof CollectionType)
{
if (!cfDef.isComposite)
throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
componentIndex--;
Map<ByteBuffer, CollectionType> collections = cfDef.hasCollections
? new HashMap<ByteBuffer, CollectionType>(cfDef.getCollectionType().defined)
: new HashMap<ByteBuffer, CollectionType>();
collections.put(columnName.key, (CollectionType)type);
ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
if (cfDef.hasCollections)
ctypes.set(ctypes.size() - 1, newColType);
else
ctypes.add(newColType);
cfm.comparator = CompositeType.getInstance(ctypes);
}
cfm.addColumnDefinition(new ColumnDefinition(columnName.key,
type,
null,
null,
null,
componentIndex));
break;
case ALTER:
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
AbstractType<?> newType = validator.getType();
if (newType instanceof CounterColumnType)
throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", columnName));
if (cfDef.hasCompositeKey)
{
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.getKeyValidator()).types);
newTypes.set(name.position, newType);
cfm.keyValidator(CompositeType.getInstance(newTypes));
}
else
{
cfm.keyValidator(newType);
}
break;
case COLUMN_ALIAS:
assert cfDef.isComposite;
List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(((CompositeType) cfm.comparator).types);
newTypes.set(name.position, validator.getType());
cfm.comparator = CompositeType.getInstance(newTypes);
break;
case VALUE_ALIAS:
cfm.defaultValidator(validator.getType());
break;
case COLUMN_METADATA:
ColumnDefinition column = cfm.getColumnDefinition(columnName.key);
column.setValidator(validator.getType());
cfm.addColumnDefinition(column);
break;
}
break;
case DROP:
if (cfDef.isCompact)
throw new InvalidRequestException("Cannot drop columns from a compact CF");
if (name == null)
throw new InvalidRequestException(String.format("Column %s was not found in CF %s", columnName, columnFamily()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case COLUMN_METADATA:
ColumnDefinition toDelete = null;
for (ColumnDefinition columnDef : cfm.getColumn_metadata().values())
{
if (columnDef.name.equals(columnName.key))
toDelete = columnDef;
}
assert toDelete != null;
cfm.removeColumnDefinition(toDelete);
break;
}
break;
case OPTS:
if (cfProps == null)
throw new InvalidRequestException(String.format("ALTER COLUMNFAMILY WITH invoked, but no parameters found"));
cfProps.validate();
cfProps.applyToCFMetadata(cfm);
break;
}
MigrationManager.announceColumnFamilyUpdate(cfm);
}
public String toString()
{
return String.format("AlterTableStatement(name=%s, type=%s, column=%s, validator=%s)",
cfName,
oType,
columnName,
validator);
}
}
Diff Result
No diff
Case 8 - java_cassandra.rev_446e3_783ce..CassandraServer.java
Base
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder);
else
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnParent(state().getKeyspace(), column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (DatabaseDescriptor.getColumnFamilyType(state().getKeyspace(), column_parent.column_family) == ColumnFamilyType.Super
&& column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(state().getKeyspace(), column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(state().getKeyspace(), column_parent.column_family, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
List<String> cfamsSeen = new ArrayList<String>();
List<RowMutation> rowMutations = new ArrayList<RowMutation>();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
ThriftValidation.validateKey(key);
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(state().getKeyspace(), cfName, mutation);
}
}
rowMutations.add(RowMutation.getRowMutationFromMutations(state().getKeyspace(), key, columnFamilyToMutations));
}
doInsert(consistency_level, rowMutations);
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnPathOrParent(state().getKeyspace(), column_path);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
doInsert(consistency_level, Arrays.asList(rm));
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), ksm.replicationFactor, cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws TException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family) throws InvalidRequestException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(convertToCFMetaData(cfDef));
}
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor,
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace) throws InvalidRequestException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
KSMetaData ksm = new KSMetaData(
ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor);
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws InvalidRequestException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new InvalidRequestException("Cluster schema does not yet agree");
}
// @see CFMetaData.applyImplicitDefaults().
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException
{
ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type);
if (cfType == null)
{
throw new InvalidRequestException("Invalid column type " + cf_def.column_type);
}
CFMetaData.applyImplicitDefaults(cf_def);
CFMetaData.validateMinMaxCompactionThresholds(cf_def);
CFMetaData.validateMemtableSettings(cf_def);
return new CFMetaData(cf_def.keyspace,
cf_def.name,
cfType,
DatabaseDescriptor.getComparator(cf_def.comparator_type),
cf_def.subcomparator_type == null ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type),
cf_def.comment,
cf_def.row_cache_size,
cf_def.key_cache_size,
cf_def.read_repair_chance,
cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS,
DatabaseDescriptor.getComparator(cf_def.default_validation_class),
cf_def.isSetMin_compaction_threshold() ? cf_def.min_compaction_threshold : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD,
cf_def.isSetMax_compaction_threshold() ? cf_def.max_compaction_threshold : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD,
cf_def.isSetRow_cache_save_period_in_seconds() ? cf_def.row_cache_save_period_in_seconds : CFMetaData.DEFAULT_ROW_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetKey_cache_save_period_in_seconds() ? cf_def.key_cache_save_period_in_seconds : CFMetaData.DEFAULT_KEY_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetMemtable_flush_after_mins() ? cf_def.memtable_flush_after_mins : CFMetaData.DEFAULT_MEMTABLE_LIFETIME_IN_MINS,
cf_def.isSetMemtable_throughput_in_mb() ? cf_def.memtable_throughput_in_mb : CFMetaData.DEFAULT_MEMTABLE_THROUGHPUT_IN_MB,
cf_def.isSetMemtable_operations_in_millions() ? cf_def.memtable_operations_in_millions : CFMetaData.DEFAULT_MEMTABLE_OPERATIONS_IN_MILLIONS,
ColumnDefinition.fromColumnDef(cf_def.column_metadata));
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// main method moved to CassandraDaemon
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder);
else
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnParent(state().getKeyspace(), column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (DatabaseDescriptor.getColumnFamilyType(state().getKeyspace(), column_parent.column_family) == ColumnFamilyType.Super
&& column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(state().getKeyspace(), column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(state().getKeyspace(), column_parent.column_family, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
List<String> cfamsSeen = new ArrayList<String>();
List<RowMutation> rowMutations = new ArrayList<RowMutation>();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
ThriftValidation.validateKey(key);
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(state().getKeyspace(), cfName, mutation);
}
}
rowMutations.add(RowMutation.getRowMutationFromMutations(state().getKeyspace(), key, columnFamilyToMutations));
}
doInsert(consistency_level, rowMutations);
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnPathOrParent(state().getKeyspace(), column_path);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
doInsert(consistency_level, Arrays.asList(rm));
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), ksm.replicationFactor, cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws TException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family) throws InvalidRequestException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(convertToCFMetaData(cfDef));
}
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor,
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace) throws InvalidRequestException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
KSMetaData ksm = new KSMetaData(
ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor);
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws InvalidRequestException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new InvalidRequestException("Cluster schema does not yet agree");
}
// @see CFMetaData.applyImplicitDefaults().
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException
{
ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type);
if (cfType == null)
{
throw new InvalidRequestException("Invalid column type " + cf_def.column_type);
}
CFMetaData.applyImplicitDefaults(cf_def);
CFMetaData.validateMinMaxCompactionThresholds(cf_def);
CFMetaData.validateMemtableSettings(cf_def);
return new CFMetaData(cf_def.keyspace,
cf_def.name,
cfType,
DatabaseDescriptor.getComparator(cf_def.comparator_type),
cf_def.subcomparator_type == null ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type),
cf_def.comment,
cf_def.row_cache_size,
cf_def.key_cache_size,
cf_def.read_repair_chance,
cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS,
DatabaseDescriptor.getComparator(cf_def.default_validation_class),
cf_def.isSetMin_compaction_threshold() ? cf_def.min_compaction_threshold : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD,
cf_def.isSetMax_compaction_threshold() ? cf_def.max_compaction_threshold : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD,
cf_def.isSetRow_cache_save_period_in_seconds() ? cf_def.row_cache_save_period_in_seconds : CFMetaData.DEFAULT_ROW_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetKey_cache_save_period_in_seconds() ? cf_def.key_cache_save_period_in_seconds : CFMetaData.DEFAULT_KEY_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetMemtable_flush_after_mins() ? cf_def.memtable_flush_after_mins : CFMetaData.DEFAULT_MEMTABLE_LIFETIME_IN_MINS,
cf_def.isSetMemtable_throughput_in_mb() ? cf_def.memtable_throughput_in_mb : CFMetaData.DEFAULT_MEMTABLE_THROUGHPUT_IN_MB,
cf_def.isSetMemtable_operations_in_millions() ? cf_def.memtable_operations_in_millions : CFMetaData.DEFAULT_MEMTABLE_OPERATIONS_IN_MILLIONS,
ColumnDefinition.fromColumnDef(cf_def.column_metadata));
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// main method moved to CassandraDaemon
}
Left
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder);
else
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnParent(state().getKeyspace(), column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (DatabaseDescriptor.getColumnFamilyType(state().getKeyspace(), column_parent.column_family) == ColumnFamilyType.Super
&& column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(state().getKeyspace(), column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(state().getKeyspace(), column_parent.column_family, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
List<String> cfamsSeen = new ArrayList<String>();
List<RowMutation> rowMutations = new ArrayList<RowMutation>();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
ThriftValidation.validateKey(key);
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(state().getKeyspace(), cfName, mutation);
}
}
rowMutations.add(RowMutation.getRowMutationFromMutations(state().getKeyspace(), key, columnFamilyToMutations));
}
doInsert(consistency_level, rowMutations);
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnPathOrParent(state().getKeyspace(), column_path);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
doInsert(consistency_level, Arrays.asList(rm));
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), ksm.replicationFactor, cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws TException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family) throws InvalidRequestException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(convertToCFMetaData(cfDef));
}
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor,
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace) throws InvalidRequestException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
KSMetaData ksm = new KSMetaData(
ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor);
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws InvalidRequestException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new InvalidRequestException("Cluster schema does not yet agree");
}
// @see CFMetaData.applyImplicitDefaults().
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException
{
ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type);
if (cfType == null)
{
throw new InvalidRequestException("Invalid column type " + cf_def.column_type);
}
CFMetaData.applyImplicitDefaults(cf_def);
CFMetaData.validateMinMaxCompactionThresholds(cf_def);
CFMetaData.validateMemtableSettings(cf_def);
return new CFMetaData(cf_def.keyspace,
cf_def.name,
cfType,
DatabaseDescriptor.getComparator(cf_def.comparator_type),
cf_def.subcomparator_type == null ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type),
cf_def.comment,
cf_def.row_cache_size,
cf_def.key_cache_size,
cf_def.read_repair_chance,
cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS,
DatabaseDescriptor.getComparator(cf_def.default_validation_class),
cf_def.isSetMin_compaction_threshold() ? cf_def.min_compaction_threshold : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD,
cf_def.isSetMax_compaction_threshold() ? cf_def.max_compaction_threshold : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD,
cf_def.isSetRow_cache_save_period_in_seconds() ? cf_def.row_cache_save_period_in_seconds : CFMetaData.DEFAULT_ROW_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetKey_cache_save_period_in_seconds() ? cf_def.key_cache_save_period_in_seconds : CFMetaData.DEFAULT_KEY_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetMemtable_flush_after_mins() ? cf_def.memtable_flush_after_mins : CFMetaData.DEFAULT_MEMTABLE_LIFETIME_IN_MINS,
cf_def.isSetMemtable_throughput_in_mb() ? cf_def.memtable_throughput_in_mb : CFMetaData.DEFAULT_MEMTABLE_THROUGHPUT_IN_MB,
cf_def.isSetMemtable_operations_in_millions() ? cf_def.memtable_operations_in_millions : CFMetaData.DEFAULT_MEMTABLE_OPERATIONS_IN_MILLIONS,
ColumnDefinition.fromColumnDef(cf_def.column_metadata));
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// main method moved to CassandraDaemon
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder);
else
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnParent(state().getKeyspace(), column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (DatabaseDescriptor.getColumnFamilyType(state().getKeyspace(), column_parent.column_family) == ColumnFamilyType.Super
&& column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(state().getKeyspace(), column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(state().getKeyspace(), column_parent.column_family, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
List<String> cfamsSeen = new ArrayList<String>();
List<RowMutation> rowMutations = new ArrayList<RowMutation>();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
ThriftValidation.validateKey(key);
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(state().getKeyspace(), cfName, mutation);
}
}
rowMutations.add(RowMutation.getRowMutationFromMutations(state().getKeyspace(), key, columnFamilyToMutations));
}
doInsert(consistency_level, rowMutations);
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnPathOrParent(state().getKeyspace(), column_path);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
doInsert(consistency_level, Arrays.asList(rm));
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), ksm.replicationFactor, cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws TException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family) throws InvalidRequestException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(convertToCFMetaData(cfDef));
}
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor,
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace) throws InvalidRequestException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
KSMetaData ksm = new KSMetaData(
ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor);
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws InvalidRequestException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new InvalidRequestException("Cluster schema does not yet agree");
}
// @see CFMetaData.applyImplicitDefaults().
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException
{
ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type);
if (cfType == null)
{
throw new InvalidRequestException("Invalid column type " + cf_def.column_type);
}
CFMetaData.applyImplicitDefaults(cf_def);
CFMetaData.validateMinMaxCompactionThresholds(cf_def);
CFMetaData.validateMemtableSettings(cf_def);
return new CFMetaData(cf_def.keyspace,
cf_def.name,
cfType,
DatabaseDescriptor.getComparator(cf_def.comparator_type),
cf_def.subcomparator_type == null ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type),
cf_def.comment,
cf_def.row_cache_size,
cf_def.key_cache_size,
cf_def.read_repair_chance,
cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS,
DatabaseDescriptor.getComparator(cf_def.default_validation_class),
cf_def.isSetMin_compaction_threshold() ? cf_def.min_compaction_threshold : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD,
cf_def.isSetMax_compaction_threshold() ? cf_def.max_compaction_threshold : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD,
cf_def.isSetRow_cache_save_period_in_seconds() ? cf_def.row_cache_save_period_in_seconds : CFMetaData.DEFAULT_ROW_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetKey_cache_save_period_in_seconds() ? cf_def.key_cache_save_period_in_seconds : CFMetaData.DEFAULT_KEY_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetMemtable_flush_after_mins() ? cf_def.memtable_flush_after_mins : CFMetaData.DEFAULT_MEMTABLE_LIFETIME_IN_MINS,
cf_def.isSetMemtable_throughput_in_mb() ? cf_def.memtable_throughput_in_mb : CFMetaData.DEFAULT_MEMTABLE_THROUGHPUT_IN_MB,
cf_def.isSetMemtable_operations_in_millions() ? cf_def.memtable_operations_in_millions : CFMetaData.DEFAULT_MEMTABLE_OPERATIONS_IN_MILLIONS,
ColumnDefinition.fromColumnDef(cf_def.column_metadata));
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// main method moved to CassandraDaemon
}
Right
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
return internal_get(key, column_path, consistency_level);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
// main method moved to CassandraDaemon
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
return internal_get(key, column_path, consistency_level);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
// main method moved to CassandraDaemon
}
MergeMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
KeepBothMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
Safe
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
<<<<<<< MINE
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
=======
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
>>>>>>> YOURS
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
<<<<<<< MINE
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
=======
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
>>>>>>> YOURS
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
Unstructured
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
>>>>>>> YOURS
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
return internal_get(key, column_path, consistency_level);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
<<<<<<< MINE
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
=======
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
>>>>>>> YOURS
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
// main method moved to CassandraDaemon
}/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
>>>>>>> YOURS
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
return internal_get(key, column_path, consistency_level);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
<<<<<<< MINE
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
=======
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
>>>>>>> YOURS
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
// main method moved to CassandraDaemon
}
Diff Result
No diff
Case 9 - java_cassandra.rev_446e3_fed37..CassandraServer.java
Base
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder);
else
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnParent(state().getKeyspace(), column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (DatabaseDescriptor.getColumnFamilyType(state().getKeyspace(), column_parent.column_family) == ColumnFamilyType.Super
&& column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(state().getKeyspace(), column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(state().getKeyspace(), column_parent.column_family, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
List<String> cfamsSeen = new ArrayList<String>();
List<RowMutation> rowMutations = new ArrayList<RowMutation>();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
ThriftValidation.validateKey(key);
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(state().getKeyspace(), cfName, mutation);
}
}
rowMutations.add(RowMutation.getRowMutationFromMutations(state().getKeyspace(), key, columnFamilyToMutations));
}
doInsert(consistency_level, rowMutations);
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnPathOrParent(state().getKeyspace(), column_path);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
doInsert(consistency_level, Arrays.asList(rm));
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), ksm.replicationFactor, cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws TException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family) throws InvalidRequestException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(convertToCFMetaData(cfDef));
}
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor,
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace) throws InvalidRequestException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
KSMetaData ksm = new KSMetaData(
ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor);
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws InvalidRequestException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new InvalidRequestException("Cluster schema does not yet agree");
}
// @see CFMetaData.applyImplicitDefaults().
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException
{
ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type);
if (cfType == null)
{
throw new InvalidRequestException("Invalid column type " + cf_def.column_type);
}
CFMetaData.applyImplicitDefaults(cf_def);
CFMetaData.validateMinMaxCompactionThresholds(cf_def);
CFMetaData.validateMemtableSettings(cf_def);
return new CFMetaData(cf_def.keyspace,
cf_def.name,
cfType,
DatabaseDescriptor.getComparator(cf_def.comparator_type),
cf_def.subcomparator_type == null ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type),
cf_def.comment,
cf_def.row_cache_size,
cf_def.key_cache_size,
cf_def.read_repair_chance,
cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS,
DatabaseDescriptor.getComparator(cf_def.default_validation_class),
cf_def.isSetMin_compaction_threshold() ? cf_def.min_compaction_threshold : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD,
cf_def.isSetMax_compaction_threshold() ? cf_def.max_compaction_threshold : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD,
cf_def.isSetRow_cache_save_period_in_seconds() ? cf_def.row_cache_save_period_in_seconds : CFMetaData.DEFAULT_ROW_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetKey_cache_save_period_in_seconds() ? cf_def.key_cache_save_period_in_seconds : CFMetaData.DEFAULT_KEY_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetMemtable_flush_after_mins() ? cf_def.memtable_flush_after_mins : CFMetaData.DEFAULT_MEMTABLE_LIFETIME_IN_MINS,
cf_def.isSetMemtable_throughput_in_mb() ? cf_def.memtable_throughput_in_mb : CFMetaData.DEFAULT_MEMTABLE_THROUGHPUT_IN_MB,
cf_def.isSetMemtable_operations_in_millions() ? cf_def.memtable_operations_in_millions : CFMetaData.DEFAULT_MEMTABLE_OPERATIONS_IN_MILLIONS,
ColumnDefinition.fromColumnDef(cf_def.column_metadata));
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// main method moved to CassandraDaemon
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder);
else
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnParent(state().getKeyspace(), column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (DatabaseDescriptor.getColumnFamilyType(state().getKeyspace(), column_parent.column_family) == ColumnFamilyType.Super
&& column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(state().getKeyspace(), column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(state().getKeyspace(), column_parent.column_family, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
List<String> cfamsSeen = new ArrayList<String>();
List<RowMutation> rowMutations = new ArrayList<RowMutation>();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
ThriftValidation.validateKey(key);
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(state().getKeyspace(), cfName, mutation);
}
}
rowMutations.add(RowMutation.getRowMutationFromMutations(state().getKeyspace(), key, columnFamilyToMutations));
}
doInsert(consistency_level, rowMutations);
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnPathOrParent(state().getKeyspace(), column_path);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
doInsert(consistency_level, Arrays.asList(rm));
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), ksm.replicationFactor, cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws TException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family) throws InvalidRequestException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(convertToCFMetaData(cfDef));
}
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor,
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace) throws InvalidRequestException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
KSMetaData ksm = new KSMetaData(
ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor);
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws InvalidRequestException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new InvalidRequestException("Cluster schema does not yet agree");
}
// @see CFMetaData.applyImplicitDefaults().
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException
{
ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type);
if (cfType == null)
{
throw new InvalidRequestException("Invalid column type " + cf_def.column_type);
}
CFMetaData.applyImplicitDefaults(cf_def);
CFMetaData.validateMinMaxCompactionThresholds(cf_def);
CFMetaData.validateMemtableSettings(cf_def);
return new CFMetaData(cf_def.keyspace,
cf_def.name,
cfType,
DatabaseDescriptor.getComparator(cf_def.comparator_type),
cf_def.subcomparator_type == null ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type),
cf_def.comment,
cf_def.row_cache_size,
cf_def.key_cache_size,
cf_def.read_repair_chance,
cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS,
DatabaseDescriptor.getComparator(cf_def.default_validation_class),
cf_def.isSetMin_compaction_threshold() ? cf_def.min_compaction_threshold : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD,
cf_def.isSetMax_compaction_threshold() ? cf_def.max_compaction_threshold : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD,
cf_def.isSetRow_cache_save_period_in_seconds() ? cf_def.row_cache_save_period_in_seconds : CFMetaData.DEFAULT_ROW_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetKey_cache_save_period_in_seconds() ? cf_def.key_cache_save_period_in_seconds : CFMetaData.DEFAULT_KEY_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetMemtable_flush_after_mins() ? cf_def.memtable_flush_after_mins : CFMetaData.DEFAULT_MEMTABLE_LIFETIME_IN_MINS,
cf_def.isSetMemtable_throughput_in_mb() ? cf_def.memtable_throughput_in_mb : CFMetaData.DEFAULT_MEMTABLE_THROUGHPUT_IN_MB,
cf_def.isSetMemtable_operations_in_millions() ? cf_def.memtable_operations_in_millions : CFMetaData.DEFAULT_MEMTABLE_OPERATIONS_IN_MILLIONS,
ColumnDefinition.fromColumnDef(cf_def.column_metadata));
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// main method moved to CassandraDaemon
}
Left
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder);
else
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnParent(state().getKeyspace(), column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (DatabaseDescriptor.getColumnFamilyType(state().getKeyspace(), column_parent.column_family) == ColumnFamilyType.Super
&& column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(state().getKeyspace(), column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(state().getKeyspace(), column_parent.column_family, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
List<String> cfamsSeen = new ArrayList<String>();
List<RowMutation> rowMutations = new ArrayList<RowMutation>();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
ThriftValidation.validateKey(key);
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(state().getKeyspace(), cfName, mutation);
}
}
rowMutations.add(RowMutation.getRowMutationFromMutations(state().getKeyspace(), key, columnFamilyToMutations));
}
doInsert(consistency_level, rowMutations);
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnPathOrParent(state().getKeyspace(), column_path);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
doInsert(consistency_level, Arrays.asList(rm));
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), ksm.replicationFactor, cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws TException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family) throws InvalidRequestException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(convertToCFMetaData(cfDef));
}
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor,
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace) throws InvalidRequestException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
KSMetaData ksm = new KSMetaData(
ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor);
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws InvalidRequestException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new InvalidRequestException("Cluster schema does not yet agree");
}
// @see CFMetaData.applyImplicitDefaults().
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException
{
ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type);
if (cfType == null)
{
throw new InvalidRequestException("Invalid column type " + cf_def.column_type);
}
CFMetaData.applyImplicitDefaults(cf_def);
CFMetaData.validateMinMaxCompactionThresholds(cf_def);
CFMetaData.validateMemtableSettings(cf_def);
return new CFMetaData(cf_def.keyspace,
cf_def.name,
cfType,
DatabaseDescriptor.getComparator(cf_def.comparator_type),
cf_def.subcomparator_type == null ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type),
cf_def.comment,
cf_def.row_cache_size,
cf_def.key_cache_size,
cf_def.read_repair_chance,
cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS,
DatabaseDescriptor.getComparator(cf_def.default_validation_class),
cf_def.isSetMin_compaction_threshold() ? cf_def.min_compaction_threshold : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD,
cf_def.isSetMax_compaction_threshold() ? cf_def.max_compaction_threshold : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD,
cf_def.isSetRow_cache_save_period_in_seconds() ? cf_def.row_cache_save_period_in_seconds : CFMetaData.DEFAULT_ROW_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetKey_cache_save_period_in_seconds() ? cf_def.key_cache_save_period_in_seconds : CFMetaData.DEFAULT_KEY_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetMemtable_flush_after_mins() ? cf_def.memtable_flush_after_mins : CFMetaData.DEFAULT_MEMTABLE_LIFETIME_IN_MINS,
cf_def.isSetMemtable_throughput_in_mb() ? cf_def.memtable_throughput_in_mb : CFMetaData.DEFAULT_MEMTABLE_THROUGHPUT_IN_MB,
cf_def.isSetMemtable_operations_in_millions() ? cf_def.memtable_operations_in_millions : CFMetaData.DEFAULT_MEMTABLE_OPERATIONS_IN_MILLIONS,
ColumnDefinition.fromColumnDef(cf_def.column_metadata));
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// main method moved to CassandraDaemon
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder);
else
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnParent(state().getKeyspace(), column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (DatabaseDescriptor.getColumnFamilyType(state().getKeyspace(), column_parent.column_family) == ColumnFamilyType.Super
&& column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(state().getKeyspace(), column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(state().getKeyspace(), column_parent.column_family, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
List<String> cfamsSeen = new ArrayList<String>();
List<RowMutation> rowMutations = new ArrayList<RowMutation>();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
ThriftValidation.validateKey(key);
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(state().getKeyspace(), cfName, mutation);
}
}
rowMutations.add(RowMutation.getRowMutationFromMutations(state().getKeyspace(), key, columnFamilyToMutations));
}
doInsert(consistency_level, rowMutations);
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
ThriftValidation.validateKey(key);
ThriftValidation.validateColumnPathOrParent(state().getKeyspace(), column_path);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
doInsert(consistency_level, Arrays.asList(rm));
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), ksm.replicationFactor, cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws TException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family) throws InvalidRequestException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(convertToCFMetaData(cfDef));
}
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor,
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace) throws InvalidRequestException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
KSMetaData ksm = new KSMetaData(
ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
ks_def.strategy_options,
ks_def.replication_factor);
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def) throws InvalidRequestException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws InvalidRequestException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new InvalidRequestException("Cluster schema does not yet agree");
}
// @see CFMetaData.applyImplicitDefaults().
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException
{
ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type);
if (cfType == null)
{
throw new InvalidRequestException("Invalid column type " + cf_def.column_type);
}
CFMetaData.applyImplicitDefaults(cf_def);
CFMetaData.validateMinMaxCompactionThresholds(cf_def);
CFMetaData.validateMemtableSettings(cf_def);
return new CFMetaData(cf_def.keyspace,
cf_def.name,
cfType,
DatabaseDescriptor.getComparator(cf_def.comparator_type),
cf_def.subcomparator_type == null ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type),
cf_def.comment,
cf_def.row_cache_size,
cf_def.key_cache_size,
cf_def.read_repair_chance,
cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS,
DatabaseDescriptor.getComparator(cf_def.default_validation_class),
cf_def.isSetMin_compaction_threshold() ? cf_def.min_compaction_threshold : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD,
cf_def.isSetMax_compaction_threshold() ? cf_def.max_compaction_threshold : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD,
cf_def.isSetRow_cache_save_period_in_seconds() ? cf_def.row_cache_save_period_in_seconds : CFMetaData.DEFAULT_ROW_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetKey_cache_save_period_in_seconds() ? cf_def.key_cache_save_period_in_seconds : CFMetaData.DEFAULT_KEY_CACHE_SAVE_PERIOD_IN_SECONDS,
cf_def.isSetMemtable_flush_after_mins() ? cf_def.memtable_flush_after_mins : CFMetaData.DEFAULT_MEMTABLE_LIFETIME_IN_MINS,
cf_def.isSetMemtable_throughput_in_mb() ? cf_def.memtable_throughput_in_mb : CFMetaData.DEFAULT_MEMTABLE_THROUGHPUT_IN_MB,
cf_def.isSetMemtable_operations_in_millions() ? cf_def.memtable_operations_in_millions : CFMetaData.DEFAULT_MEMTABLE_OPERATIONS_IN_MILLIONS,
ColumnDefinition.fromColumnDef(cf_def.column_metadata));
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// main method moved to CassandraDaemon
}
Right
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
return internal_get(key, column_path, consistency_level);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
// main method moved to CassandraDaemon
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
return internal_get(key, column_path, consistency_level);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
// main method moved to CassandraDaemon
}
MergeMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
KeepBothMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
Safe
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
<<<<<<< MINE
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
=======
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
>>>>>>> YOURS
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface {
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
<<<<<<< MINE
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
=======
return internal_get(key, column_path, consistency_level);
>>>>>>> YOURS
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
<<<<<<< MINE
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
{
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
=======
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
logger.debug("... timed out");
throw new TimedOutException();
}
}
finally
{
release();
}
}
>>>>>>> YOURS
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications. */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
// @see CFMetaData.applyImplicitDefaults().
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
}
Unstructured
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
>>>>>>> YOURS
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
return internal_get(key, column_path, consistency_level);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
<<<<<<< MINE
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
=======
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
>>>>>>> YOURS
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
// main method moved to CassandraDaemon
}/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.thrift;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import com.google.common.base.Predicates;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.antlr.runtime.RecognitionException;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.migration.*;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.locator.*;
import org.apache.cassandra.scheduler.IRequestScheduler;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
public class CassandraServer implements Cassandra.Iface
{
private static Logger logger = LoggerFactory.getLogger(CassandraServer.class);
private final static List<ColumnOrSuperColumn> EMPTY_COLUMNS = Collections.emptyList();
private final static List<Column> EMPTY_SUBCOLUMNS = Collections.emptyList();
private final static List<CounterColumn> EMPTY_COUNTER_SUBCOLUMNS = Collections.emptyList();
// thread local state containing session information
public final ThreadLocal<ClientState> clientState = new ThreadLocal<ClientState>()
{
@Override
public ClientState initialValue()
{
return new ClientState();
}
};
/*
* RequestScheduler to perform the scheduling of incoming requests
*/
private final IRequestScheduler requestScheduler;
public CassandraServer()
{
requestScheduler = DatabaseDescriptor.getRequestScheduler();
}
public ClientState state()
{
return clientState.get();
}
protected Map<DecoratedKey, ColumnFamily> readColumnFamily(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
// TODO - Support multiple column families per row, right now row only contains 1 column family
Map<DecoratedKey, ColumnFamily> columnFamilyKeyMap = new HashMap<DecoratedKey, ColumnFamily>();
if (consistency_level == ConsistencyLevel.ANY)
{
throw new InvalidRequestException("Consistency level any may not be applied to read operations");
}
List<Row> rows;
try
{
try
{
schedule();
rows = StorageProxy.read(commands, consistency_level);
}
finally
{
release();
}
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (Row row: rows)
{
columnFamilyKeyMap.put(row.key, row.cf);
}
return columnFamilyKeyMap;
}
public List<Column> thriftifySubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_SUBCOLUMNS;
}
ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
{
if (columns == null || columns.isEmpty())
{
return EMPTY_COUNTER_SUBCOLUMNS;
}
ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
assert column instanceof org.apache.cassandra.db.CounterColumn;
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(thrift_column);
}
return thriftColumns;
}
public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
if (column.isMarkedForDelete())
{
continue;
}
if (column instanceof org.apache.cassandra.db.CounterColumn)
{
CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
}
else
{
Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
if (column instanceof ExpiringColumn)
{
thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
}
thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
}
}
// we have to do the reversing here, since internally we pass results around in ColumnFamily
// objects, which always sort their columns in the "natural" order
// TODO this is inconvenient for direct users of StorageProxy
if (reverseOrder)
Collections.reverse(thriftColumns);
return thriftColumns;
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
{
if (isCounterCF)
return thriftifyCounterSuperColumns(columns, reverseOrder);
else
return thriftifySuperColumns(columns, reverseOrder);
}
private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
{
ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
for (IColumn column : columns)
{
List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
if (subcolumns.isEmpty())
{
continue;
}
CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
}
if (reverseOrder)
Collections.reverse(thriftSuperColumns);
return thriftSuperColumns;
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
for (ReadCommand command: commands)
{
ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).reversed;
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
columnFamiliesMap.put(command.key, thriftifiedColumns);
}
return columnFamiliesMap;
}
private List<ColumnOrSuperColumn> thriftifyColumnFamily(ColumnFamily cf, boolean subcolumnsOnly, boolean reverseOrder)
{
if (cf == null || cf.getColumnsMap().size() == 0)
return EMPTY_COLUMNS;
if (subcolumnsOnly)
{
IColumn column = cf.getColumnsMap().values().iterator().next();
Collection<IColumn> subcolumns = column.getSubColumns();
if (subcolumns == null || subcolumns.isEmpty())
return EMPTY_COLUMNS;
else
return thriftifyColumns(subcolumns, reverseOrder);
}
if (cf.isSuper())
{
boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
}
else
{
return thriftifyColumns(cf.getSortedColumns(), reverseOrder);
}
}
public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), Collections.singletonList(key), column_parent, predicate, consistency_level).get(key);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_slice");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return multigetSliceInternal(state().getKeyspace(), keys, column_parent, predicate, consistency_level);
}
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, predicate);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
>>>>>>> YOURS
List<ReadCommand> commands = new ArrayList<ReadCommand>();
if (predicate.column_names != null)
{
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
}
}
else
{
SliceRange range = predicate.slice_range;
for (ByteBuffer key: keys)
{
ThriftValidation.validateKey(metadata, key);
commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
}
}
return getSlice(commands, consistency_level);
}
private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnPath(keyspace, column_path);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family);
ThriftValidation.validateColumnPath(metadata, column_path);
>>>>>>> YOURS
QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
ThriftValidation.validateKey(metadata, key);
ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistency_level);
ColumnFamily cf = cfamilies.get(StorageService.getPartitioner().decorateKey(command.key));
if (cf == null)
throw new NotFoundException();
List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
if (tcolumns.isEmpty())
throw new NotFoundException();
assert tcolumns.size() == 1;
return tcolumns.get(0);
}
public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException
{
logger.debug("get");
return internal_get(key, column_path, consistency_level);
}
public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("get_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
return get_slice(key, column_parent, predicate, consistency_level).size();
}
public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, Integer> counts = new HashMap<ByteBuffer, Integer>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
counts.put(cf.getKey(), cf.getValue().size());
}
return counts;
}
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
try
{
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("insert");
internal_insert(key, column_parent, column, consistency_level);
}
private void internal_batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
List<String> cfamsSeen = new ArrayList<String>();
List<IMutation> rowMutations = new ArrayList<IMutation>();
String keyspace = state().getKeyspace();
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> mutationEntry: mutation_map.entrySet())
{
ByteBuffer key = mutationEntry.getKey();
// We need to separate row mutation for standard cf and counter cf (that will be encapsulated in a
// CounterMutation) because it doesn't follow the same code path
RowMutation rmStandard = null;
RowMutation rmCounter = null;
Map<String, List<Mutation>> columnFamilyToMutations = mutationEntry.getValue();
for (Map.Entry<String, List<Mutation>> columnFamilyMutations : columnFamilyToMutations.entrySet())
{
String cfName = columnFamilyMutations.getKey();
// Avoid unneeded authorizations
if (!(cfamsSeen.contains(cfName)))
{
state().hasColumnFamilyAccess(cfName, Permission.WRITE);
cfamsSeen.add(cfName);
}
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName);
ThriftValidation.validateKey(metadata, key);
RowMutation rm;
if (metadata.getDefaultValidator().isCommutative())
{
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
rmCounter = rmCounter == null ? new RowMutation(keyspace, key) : rmCounter;
rm = rmCounter;
}
else
{
rmStandard = rmStandard == null ? new RowMutation(keyspace, key) : rmStandard;
rm = rmStandard;
}
for (Mutation mutation : columnFamilyMutations.getValue())
{
ThriftValidation.validateMutation(metadata, mutation);
if (mutation.deletion != null)
{
rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
}
if (mutation.column_or_supercolumn != null)
{
rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
}
}
}
if (rmStandard != null && !rmStandard.isEmpty())
rowMutations.add(rmStandard);
if (rmCounter != null && !rmCounter.isEmpty())
rowMutations.add(new org.apache.cassandra.db.CounterMutation(rmCounter, consistency_level));
}
doInsert(consistency_level, rowMutations);
}
public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("batch_mutate");
internal_batch_mutate(mutation_map, consistency_level);
}
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp)
throws InvalidRequestException, UnavailableException, TimedOutException
{
state().hasColumnFamilyAccess(column_path.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_path.column_family, isCommutativeOp);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnPathOrParent(metadata, column_path);
if (isCommutativeOp)
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
RowMutation rm = new RowMutation(state().getKeyspace(), key);
rm.delete(new QueryPath(column_path), timestamp);
if (isCommutativeOp)
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
else
doInsert(consistency_level, Arrays.asList(rm));
}
public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException
{
logger.debug("remove");
internal_remove(key, column_path, timestamp, consistency_level, false);
}
<<<<<<< MINE
private void doInsert(ConsistencyLevel consistency_level, List<RowMutation> mutations) throws UnavailableException, TimedOutException, InvalidRequestException
=======
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations) throws UnavailableException, TimedOutException
>>>>>>> YOURS
{
ThriftValidation.validateConsistencyLevel(state().getKeyspace(), consistency_level);
try
{
schedule();
try
{
if (!mutations.isEmpty())
StorageProxy.mutate(mutations, consistency_level);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
}
finally
{
release();
}
}
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values())
cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TException, TimedOutException
{
logger.debug("range_slice");
String keyspace = state().getKeyspace();
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, predicate);
ThriftValidation.validateKeyRange(range);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
List<Row> rows;
try
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds bounds;
if (range.start_key == null)
{
Token.TokenFactory tokenFactory = p.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
bounds = new Range(left, right);
}
else
{
bounds = new Bounds(p.getToken(range.start_key), p.getToken(range.end_key));
}
try
{
schedule();
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, predicate, bounds, range.count), consistency_level);
}
finally
{
release();
}
assert rows != null;
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return thriftifyKeySlices(rows, column_parent, predicate);
}
private List<KeySlice> thriftifyKeySlices(List<Row> rows, ColumnParent column_parent, SlicePredicate predicate)
{
List<KeySlice> keySlices = new ArrayList<KeySlice>(rows.size());
boolean reversed = predicate.slice_range != null && predicate.slice_range.reversed;
for (Row row : rows)
{
List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(row.cf, column_parent.super_column != null, reversed);
keySlices.add(new KeySlice(row.key.key, thriftifiedColumns));
}
return keySlices;
}
public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("scan");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
<<<<<<< MINE
ThriftValidation.validateColumnParent(keyspace, column_parent);
ThriftValidation.validatePredicate(keyspace, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(keyspace, column_parent.column_family, index_clause);
ThriftValidation.validateConsistencyLevel(keyspace, consistency_level);
=======
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false);
ThriftValidation.validateColumnParent(metadata, column_parent);
ThriftValidation.validatePredicate(metadata, column_parent, column_predicate);
ThriftValidation.validateIndexClauses(metadata, index_clause);
>>>>>>> YOURS
List<Row> rows;
try
{
rows = StorageProxy.scan(keyspace, column_parent.column_family, index_clause, column_predicate, consistency_level);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
catch (TimeoutException e)
{
throw new TimedOutException();
}
return thriftifyKeySlices(rows, column_parent, column_predicate);
}
public List<KsDef> describe_keyspaces() throws TException, InvalidRequestException
{
state().hasKeyspaceListAccess(Permission.READ);
Set<String> keyspaces = DatabaseDescriptor.getTables();
List<KsDef> ksset = new ArrayList<KsDef>();
for (String ks : keyspaces)
{
try
{
ksset.add(describe_keyspace(ks));
}
catch (NotFoundException nfe)
{
logger.info("Failed to find metadata for keyspace '" + ks + "'. Continuing... ");
}
}
return ksset;
}
public String describe_cluster_name() throws TException
{
return DatabaseDescriptor.getClusterName();
}
public String describe_version() throws TException
{
return Constants.VERSION;
}
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
public String describe_partitioner() throws TException
{
return StorageService.getPartitioner().getClass().getName();
}
public String describe_snitch() throws TException
{
if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch)
return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName();
return DatabaseDescriptor.getEndpointSnitch().getClass().getName();
}
public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split)
throws TException, InvalidRequestException
{
// TODO: add keyspace authorization call post CASSANDRA-1425
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
List<Token> tokens = StorageService.instance.getSplits(state().getKeyspace(), cfName, new Range(tf.fromString(start_token), tf.fromString(end_token)), keys_per_split);
List<String> splits = new ArrayList<String>(tokens.size());
for (Token token : tokens)
{
splits.add(tf.toString(token));
}
return splits;
}
public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException
{
state().login(auth_request.getCredentials());
}
/**
* Schedule the current thread for access to the required services
*/
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue());
}
/**
* Release count for the used up resources
*/
private void release()
{
requestScheduler.release();
}
// helper method to apply migration on the migration stage. typical migration failures will throw an
// InvalidRequestException. atypical failures will throw a RuntimeException.
private static void applyMigrationOnStage(final Migration m)
{
Future f = StageManager.getStage(Stage.MIGRATION).submit(new Callable()
{
public Object call() throws Exception
{
m.apply();
m.announce();
return null;
}
});
try
{
f.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
}
public synchronized String system_add_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new AddColumnFamily(CFMetaData.convertToCFMetaData(cf_def)));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_column_family(String column_family)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropColumnFamily(state().getKeyspace(), column_family));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_add_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("add_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
// generate a meaningful error if the user setup keyspace and/or column definition incorrectly
for (CfDef cf : ks_def.cf_defs)
{
if (!cf.getKeyspace().equals(ks_def.getName()))
{
throw new InvalidRequestException("CsDef (" + cf.getName() +") had a keyspace definition that did not match KsDef");
}
}
try
{
Collection<CFMetaData> cfDefs = new ArrayList<CFMetaData>(ks_def.cf_defs.size());
for (CfDef cfDef : ks_def.cf_defs)
{
ThriftValidation.validateCfDef(cfDef);
cfDefs.add(CFMetaData.convertToCFMetaData(cfDef));
}
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def),
cfDefs.toArray(new CFMetaData[cfDefs.size()]));
applyMigrationOnStage(new AddKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_drop_keyspace(String keyspace)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("drop_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
validateSchemaAgreement();
try
{
applyMigrationOnStage(new DropKeyspace(keyspace));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException */
public synchronized String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
state().hasKeyspaceListAccess(Permission.WRITE);
ThriftValidation.validateTable(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
validateSchemaAgreement();
try
{
ThriftValidation.validateKsDef(ks_def);
KSMetaData ksm = new KSMetaData(ks_def.name,
AbstractReplicationStrategy.getClass(ks_def.strategy_class),
KSMetaData.backwardsCompatibleOptions(ks_def));
applyMigrationOnStage(new UpdateKeyspace(ksm));
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
public synchronized String system_update_column_family(CfDef cf_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_column_family");
state().hasColumnFamilyListAccess(Permission.WRITE);
ThriftValidation.validateCfDef(cf_def);
if (cf_def.keyspace == null || cf_def.name == null)
throw new InvalidRequestException("Keyspace and CF name must be set.");
CFMetaData oldCfm = DatabaseDescriptor.getCFMetaData(CFMetaData.getId(cf_def.keyspace, cf_def.name));
if (oldCfm == null)
throw new InvalidRequestException("Could not find column family definition to modify.");
validateSchemaAgreement();
try
{
// ideally, apply() would happen on the stage with the
CFMetaData.applyImplicitDefaults(cf_def);
UpdateColumnFamily update = new UpdateColumnFamily(CFMetaData.convertToAvro(cf_def));
applyMigrationOnStage(update);
return DatabaseDescriptor.getDefsVersion().toString();
}
catch (ConfigurationException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
catch (IOException e)
{
InvalidRequestException ex = new InvalidRequestException(e.getMessage());
ex.initCause(e);
throw ex;
}
}
private void validateSchemaAgreement() throws SchemaDisagreementException
{
// unreachable hosts don't count towards disagreement
Map<String, List<String>> versions = Maps.filterKeys(StorageProxy.describeSchemaVersions(),
Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
if (versions.size() > 1)
throw new SchemaDisagreementException();
}
public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException
{
logger.debug("truncating {} in {}", cfname, state().getKeyspace());
state().hasColumnFamilyAccess(cfname, Permission.WRITE);
try
{
schedule();
StorageProxy.truncateBlocking(state().getKeyspace(), cfname);
}
catch (TimeoutException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
catch (IOException e)
{
throw (UnavailableException) new UnavailableException().initCause(e);
}
finally
{
release();
}
}
public void set_keyspace(String keyspace) throws InvalidRequestException, TException
{
ThriftValidation.validateTable(keyspace);
state().setKeyspace(keyspace);
}
public Map<String, List<String>> describe_schema_versions() throws TException, InvalidRequestException
{
logger.debug("checking schema agreement");
return StorageProxy.describeSchemaVersions();
}
// counter methods
public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("add");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
String keyspace = state().getKeyspace();
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're adding
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null)
{
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
RowMutation rm = new RowMutation(keyspace, key);
try
{
rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
}
catch (MarshalException e)
{
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
}
public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level)
throws InvalidRequestException, UnavailableException, TimedOutException, TException
{
logger.debug("remove_counter");
internal_remove(key, path, System.currentTimeMillis(), consistency_level, true);
}
public CqlResult execute_cql_query(ByteBuffer query, Compression compression)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
{
String queryString = null;
// Decompress the query string.
try
{
switch (compression)
{
case GZIP:
ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];
Inflater decompressor = new Inflater();
int lenRead = 0;
while (true)
{
if (decompressor.needsInput())
lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
query.get(inBuffer, 0, lenRead);
decompressor.setInput(inBuffer, 0, lenRead);
int lenWrite = 0;
while ((lenWrite = decompressor.inflate(outBuffer)) !=0)
byteArray.write(outBuffer, 0, lenWrite);
if (decompressor.finished())
break;
}
decompressor.end();
queryString = new String(byteArray.toByteArray(), 0, byteArray.size(), "UTF-8");
break;
case NONE:
try
{
queryString = ByteBufferUtil.string(query);
}
catch (CharacterCodingException ex)
{
throw new InvalidRequestException(ex.getMessage());
}
break;
}
}
catch (DataFormatException e)
{
throw new InvalidRequestException("Error deflating query string.");
}
catch (UnsupportedEncodingException e)
{
throw new InvalidRequestException("Unknown query string encoding.");
}
try
{
return QueryProcessor.process(queryString, state());
}
catch (RecognitionException e)
{
InvalidRequestException ire = new InvalidRequestException("Invalid or malformed CQL query string");
ire.initCause(e);
throw ire;
}
}
// main method moved to CassandraDaemon
}
Diff Result
No diff
Case 10 - java_cassandra.rev_48438_39dd8..StreamInSession.java
Base
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.IOException;
import java.net.InetAddress;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.MessagingService;
import org.apache.cassandra.utils.Pair;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession
{
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final Pair<InetAddress, Long> context;
private final Runnable callback;
private String table;
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private StreamInSession(Pair<InetAddress, Long> context, Runnable callback)
{
this.context = context;
this.callback = callback;
}
public static StreamInSession create(InetAddress host, Runnable callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, System.nanoTime());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
{
session = possibleNew;
}
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {}. Sending ack to {}", remoteFile, this);
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
public void retry(PendingFile remoteFile) throws IOException
{
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} from {} failed: requesting a retry.", remoteFile, this);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transfered");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
cfs.addSSTable(sstable);
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
if (callback != null)
callback.run();
sessions.remove(context);
}
}
public long getSessionId()
{
return context.right;
}
public InetAddress getHost()
{
return context.left;
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.IOException;
import java.net.InetAddress;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.MessagingService;
import org.apache.cassandra.utils.Pair;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession
{
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final Pair<InetAddress, Long> context;
private final Runnable callback;
private String table;
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private StreamInSession(Pair<InetAddress, Long> context, Runnable callback)
{
this.context = context;
this.callback = callback;
}
public static StreamInSession create(InetAddress host, Runnable callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, System.nanoTime());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
{
session = possibleNew;
}
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {}. Sending ack to {}", remoteFile, this);
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
public void retry(PendingFile remoteFile) throws IOException
{
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} from {} failed: requesting a retry.", remoteFile, this);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transfered");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
cfs.addSSTable(sstable);
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
if (callback != null)
callback.run();
sessions.remove(context);
}
}
public long getSessionId()
{
return context.right;
}
public InetAddress getHost()
{
return context.left;
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
Left
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.IOException;
import java.net.InetAddress;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.MessagingService;
import org.apache.cassandra.utils.Pair;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession
{
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final Pair<InetAddress, Long> context;
private final Runnable callback;
private String table;
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
private StreamInSession(Pair<InetAddress, Long> context, Runnable callback)
{
this.context = context;
this.callback = callback;
}
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
public static StreamInSession create(InetAddress host, Runnable callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
{
session = possibleNew;
}
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {}. Sending ack to {}", remoteFile, this);
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
public void retry(PendingFile remoteFile) throws IOException
{
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} from {} failed: requesting a retry.", remoteFile, this);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transfered");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
cfs.addSSTable(sstable);
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
if (callback != null)
callback.run();
sessions.remove(context);
}
}
public int getSourceFlag()
{
return (int)(context.right >> 32);
}
public long getSessionId()
{
return context.right;
}
public InetAddress getHost()
{
return context.left;
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.IOException;
import java.net.InetAddress;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.MessagingService;
import org.apache.cassandra.utils.Pair;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession
{
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final Pair<InetAddress, Long> context;
private final Runnable callback;
private String table;
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
private StreamInSession(Pair<InetAddress, Long> context, Runnable callback)
{
this.context = context;
this.callback = callback;
}
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
public static StreamInSession create(InetAddress host, Runnable callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
{
session = possibleNew;
}
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {}. Sending ack to {}", remoteFile, this);
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
public void retry(PendingFile remoteFile) throws IOException
{
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} from {} failed: requesting a retry.", remoteFile, this);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transfered");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
cfs.addSSTable(sstable);
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
if (callback != null)
callback.run();
sessions.remove(context);
}
}
public int getSourceFlag()
{
return (int)(context.right >> 32);
}
public long getSessionId()
{
return context.right;
}
public InetAddress getHost()
{
return context.left;
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
Right
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession
{
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback)
{
super(null, context, callback);
}
public static StreamInSession create(InetAddress host, IStreamCallback callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void setSocket(Socket socket)
{
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] {remoteFile, getHost(), this});
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException
{
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries())
{
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current),
new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try
{
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
}
catch (IOException e)
{
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException
{
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
{
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try
{
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
}
finally
{
if (socket != null)
socket.close();
}
close(true);
}
}
protected void closeInternal(boolean success)
{
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost()))
{
try
{
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
catch (IOException ex)
{
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession
{
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback)
{
super(null, context, callback);
}
public static StreamInSession create(InetAddress host, IStreamCallback callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void setSocket(Socket socket)
{
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] {remoteFile, getHost(), this});
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException
{
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries())
{
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current),
new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try
{
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
}
catch (IOException e)
{
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException
{
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
{
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try
{
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
}
finally
{
if (socket != null)
socket.close();
}
close(true);
}
}
protected void closeInternal(boolean success)
{
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost()))
{
try
{
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
catch (IOException ex)
{
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
MergeMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession {
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private static final AtomicInteger sessionIdCounter = new AtomicInteger(0);
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId() {
return (((long) StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback) {
super(null, context, callback);
}
public static StreamInSession create(InetAddress host, IStreamCallback callback) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null) {
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file) {
this.current = file;
}
public void setTable(String table) {
this.table = table;
}
public void setSocket(Socket socket) {
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files) {
for (PendingFile file : files) {
if (logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException {
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] { remoteFile, getHost(), this });
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException {
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries()) {
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current), new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try {
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
} catch (IOException e) {
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException {
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException {
if (files.isEmpty()) {
HashMap<ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try {
for (SSTableReader sstable : readers) {
assert sstable.getTableName().equals(table);
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet()) {
if (entry.getKey() != null) {
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
} finally {
for (List<SSTableReader> referenced : cfstores.values()) SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try {
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
} finally {
if (socket != null)
socket.close();
}
close(true);
}
}
public int getSourceFlag() {
return (int) (context.right >> 32);
}
protected void closeInternal(boolean success) {
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost())) {
try {
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
} catch (IOException ex) {
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources() {
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values()) {
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host) {
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet()) {
if (entry.getKey().left.equals(host)) {
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession {
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private static final AtomicInteger sessionIdCounter = new AtomicInteger(0);
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId() {
return (((long) StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback) {
super(null, context, callback);
}
public static StreamInSession create(InetAddress host, IStreamCallback callback) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null) {
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file) {
this.current = file;
}
public void setTable(String table) {
this.table = table;
}
public void setSocket(Socket socket) {
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files) {
for (PendingFile file : files) {
if (logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException {
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] { remoteFile, getHost(), this });
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException {
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries()) {
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current), new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try {
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
} catch (IOException e) {
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException {
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException {
if (files.isEmpty()) {
HashMap<ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try {
for (SSTableReader sstable : readers) {
assert sstable.getTableName().equals(table);
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet()) {
if (entry.getKey() != null) {
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
} finally {
for (List<SSTableReader> referenced : cfstores.values()) SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try {
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
} finally {
if (socket != null)
socket.close();
}
close(true);
}
}
public int getSourceFlag() {
return (int) (context.right >> 32);
}
protected void closeInternal(boolean success) {
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost())) {
try {
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
} catch (IOException ex) {
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources() {
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values()) {
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host) {
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet()) {
if (entry.getKey().left.equals(host)) {
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
KeepBothMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession {
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private static final AtomicInteger sessionIdCounter = new AtomicInteger(0);
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId() {
return (((long) StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
public static StreamInSession create(InetAddress host, Runnable callback) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback) {
super(null, context, callback);
}
public static StreamInSession create(InetAddress host, IStreamCallback callback) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null) {
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file) {
this.current = file;
}
public void setTable(String table) {
this.table = table;
}
public void setSocket(Socket socket) {
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files) {
for (PendingFile file : files) {
if (logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException {
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] { remoteFile, getHost(), this });
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException {
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries()) {
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current), new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try {
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
} catch (IOException e) {
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException {
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException {
if (files.isEmpty()) {
HashMap<ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try {
for (SSTableReader sstable : readers) {
assert sstable.getTableName().equals(table);
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet()) {
if (entry.getKey() != null) {
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
} finally {
for (List<SSTableReader> referenced : cfstores.values()) SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try {
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
} finally {
if (socket != null)
socket.close();
}
close(true);
}
}
public int getSourceFlag() {
return (int) (context.right >> 32);
}
protected void closeInternal(boolean success) {
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost())) {
try {
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
} catch (IOException ex) {
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources() {
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values()) {
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host) {
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet()) {
if (entry.getKey().left.equals(host)) {
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession {
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private static final AtomicInteger sessionIdCounter = new AtomicInteger(0);
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId() {
return (((long) StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
public static StreamInSession create(InetAddress host, Runnable callback) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback) {
super(null, context, callback);
}
public static StreamInSession create(InetAddress host, IStreamCallback callback) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId) {
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null) {
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file) {
this.current = file;
}
public void setTable(String table) {
this.table = table;
}
public void setSocket(Socket socket) {
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files) {
for (PendingFile file : files) {
if (logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException {
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] { remoteFile, getHost(), this });
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException {
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries()) {
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current), new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try {
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
} catch (IOException e) {
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException {
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException {
if (files.isEmpty()) {
HashMap<ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try {
for (SSTableReader sstable : readers) {
assert sstable.getTableName().equals(table);
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet()) {
if (entry.getKey() != null) {
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
} finally {
for (List<SSTableReader> referenced : cfstores.values()) SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try {
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
} finally {
if (socket != null)
socket.close();
}
close(true);
}
}
public int getSourceFlag() {
return (int) (context.right >> 32);
}
protected void closeInternal(boolean success) {
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost())) {
try {
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
} catch (IOException ex) {
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources() {
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values()) {
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host) {
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet()) {
if (entry.getKey().left.equals(host)) {
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
Safe
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession {
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
<<<<<<< MINE
public static StreamInSession create(InetAddress host, IStreamCallback callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
=======
public static StreamInSession create(InetAddress host, Runnable callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
>>>>>>> YOURS
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback)
{
super(null, context, callback);
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void setSocket(Socket socket)
{
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] {remoteFile, getHost(), this});
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException
{
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries())
{
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current),
new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try
{
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
}
catch (IOException e)
{
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException
{
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
{
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try
{
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
}
finally
{
if (socket != null)
socket.close();
}
close(true);
}
}
public int getSourceFlag()
{
return (int)(context.right >> 32);
}
protected void closeInternal(boolean success)
{
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost()))
{
try
{
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
catch (IOException ex)
{
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession {
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
<<<<<<< MINE
public static StreamInSession create(InetAddress host, IStreamCallback callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
=======
public static StreamInSession create(InetAddress host, Runnable callback)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
>>>>>>> YOURS
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback)
{
super(null, context, callback);
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void setSocket(Socket socket)
{
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] {remoteFile, getHost(), this});
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException
{
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries())
{
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current),
new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try
{
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
}
catch (IOException e)
{
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException
{
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
{
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try
{
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
}
finally
{
if (socket != null)
socket.close();
}
close(true);
}
}
public int getSourceFlag()
{
return (int)(context.right >> 32);
}
protected void closeInternal(boolean success)
{
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost()))
{
try
{
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
catch (IOException ex)
{
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
Unstructured
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession
{
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
<<<<<<< MINE
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
private StreamInSession(Pair<InetAddress, Long> context, Runnable callback)
=======
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
>>>>>>> YOURS
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
<<<<<<< MINE
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
public static StreamInSession create(InetAddress host, Runnable callback)
=======
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback)
>>>>>>> YOURS
{
<<<<<<< MINE
=======
super(null, context, callback);
}
public static StreamInSession create(InetAddress host, IStreamCallback callback)
{
>>>>>>> YOURS
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void setSocket(Socket socket)
{
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] {remoteFile, getHost(), this});
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException
{
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries())
{
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current),
new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try
{
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
}
catch (IOException e)
{
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException
{
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
{
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try
{
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
}
finally
{
if (socket != null)
socket.close();
}
close(true);
}
}
<<<<<<< MINE
public int getSourceFlag()
{
return (int)(context.right >> 32);
}
public long getSessionId()
=======
protected void closeInternal(boolean success)
>>>>>>> YOURS
{
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost()))
{
try
{
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
catch (IOException ex)
{
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.net.MessagingService;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
import org.cliffc.high_scale_lib.NonBlockingHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.gms.*;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.OutboundTcpConnection;
import org.apache.cassandra.utils.Pair;
/** each context gets its own StreamInSession. So there may be >1 Session per host */
public class StreamInSession extends AbstractStreamSession
{
private static final Logger logger = LoggerFactory.getLogger(StreamInSession.class);
private static ConcurrentMap<Pair<InetAddress, Long>, StreamInSession> sessions = new NonBlockingHashMap<Pair<InetAddress, Long>, StreamInSession>();
private final Set<PendingFile> files = new NonBlockingHashSet<PendingFile>();
private final List<SSTableReader> readers = new ArrayList<SSTableReader>();
private PendingFile current;
private Socket socket;
private volatile int retries;
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
<<<<<<< MINE
private final static AtomicInteger sessionIdCounter = new AtomicInteger(0);
private StreamInSession(Pair<InetAddress, Long> context, Runnable callback)
=======
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
>>>>>>> YOURS
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
<<<<<<< MINE
/**
* The next session id is a combination of a local integer counter and a flag used to avoid collisions
* between session id's generated on different machines. Nodes can may have StreamOutSessions with the
* following contexts:
*
* <1.1.1.1, (stream_in_flag, 6)>
* <1.1.1.1, (stream_out_flag, 6)>
*
* The first is an out stream created in response to a request from node 1.1.1.1. The id (6) was created by
* the requesting node. The second is an out stream created by this node to push to 1.1.1.1. The id (6) was
* created by this node.
*
* Note: The StreamInSession results in a StreamOutSession on the target that uses the StreamInSession sessionId.
*
* @return next StreamInSession sessionId
*/
private static long nextSessionId()
{
return (((long)StreamHeader.STREAM_IN_SOURCE_FLAG << 32) + sessionIdCounter.incrementAndGet());
}
public static StreamInSession create(InetAddress host, Runnable callback)
=======
private StreamInSession(Pair<InetAddress, Long> context, IStreamCallback callback)
>>>>>>> YOURS
{
<<<<<<< MINE
=======
super(null, context, callback);
}
public static StreamInSession create(InetAddress host, IStreamCallback callback)
{
>>>>>>> YOURS
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, nextSessionId());
StreamInSession session = new StreamInSession(context, callback);
sessions.put(context, session);
return session;
}
public static StreamInSession get(InetAddress host, long sessionId)
{
Pair<InetAddress, Long> context = new Pair<InetAddress, Long>(host, sessionId);
StreamInSession session = sessions.get(context);
if (session == null)
{
StreamInSession possibleNew = new StreamInSession(context, null);
if ((session = sessions.putIfAbsent(context, possibleNew)) == null)
session = possibleNew;
}
return session;
}
public void setCurrentFile(PendingFile file)
{
this.current = file;
}
public void setTable(String table)
{
this.table = table;
}
public void setSocket(Socket socket)
{
this.socket = socket;
}
public void addFiles(Collection<PendingFile> files)
{
for (PendingFile file : files)
{
if(logger.isDebugEnabled())
logger.debug("Adding file {} to Stream Request queue", file.getFilename());
this.files.add(file);
}
}
public void finished(PendingFile remoteFile, SSTableReader reader) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {} (from {}). Sending ack to {}", new Object[] {remoteFile, getHost(), this});
assert reader != null;
readers.add(reader);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
logger.debug("ack {} sent for {}", reply, remoteFile);
}
public void retry(PendingFile remoteFile) throws IOException
{
retries++;
if (retries > DatabaseDescriptor.getMaxStreamingRetries())
{
logger.error(String.format("Failed streaming session %d from %s while receiving %s", getSessionId(), getHost().toString(), current),
new IllegalStateException("Too many retries for " + remoteFile));
close(false);
return;
}
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_RETRY);
logger.info("Streaming of file {} for {} failed: requesting a retry.", remoteFile, this);
try
{
sendMessage(reply.getMessage(Gossiper.instance.getVersion(getHost())));
}
catch (IOException e)
{
logger.error("Sending retry message failed, closing session.", e);
close(false);
}
}
public void sendMessage(Message message) throws IOException
{
OutboundTcpConnection.write(message, String.valueOf(getSessionId()), new DataOutputStream(socket.getOutputStream()));
}
public void closeIfFinished() throws IOException
{
if (files.isEmpty())
{
HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
try
{
for (SSTableReader sstable : readers)
{
assert sstable.getTableName().equals(table);
// Acquire the reference (for secondary index building) before submitting the index build,
// so it can't get compacted out of existence in between
if (!sstable.acquireReference())
throw new AssertionError("We shouldn't fail acquiring a reference on a sstable that has just been transferred");
ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
if (!cfstores.containsKey(cfs))
cfstores.put(cfs, new ArrayList<SSTableReader>());
cfstores.get(cfs).add(sstable);
}
// add sstables and build secondary indexes
for (Map.Entry<ColumnFamilyStore, List<SSTableReader>> entry : cfstores.entrySet())
{
if (entry.getKey() != null)
{
entry.getKey().addSSTables(entry.getValue());
entry.getKey().indexManager.maybeBuildSecondaryIndexes(entry.getValue(), entry.getKey().indexManager.getIndexedColumns());
}
}
}
finally
{
for (List<SSTableReader> referenced : cfstores.values())
SSTableReader.releaseReferences(referenced);
}
// send reply to source that we're done
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FINISHED);
logger.info("Finished streaming session {} from {}", getSessionId(), getHost());
try
{
if (socket != null)
OutboundTcpConnection.write(reply.getMessage(Gossiper.instance.getVersion(getHost())), context.right.toString(), new DataOutputStream(socket.getOutputStream()));
else
logger.debug("No socket to reply to {} with!", getHost());
}
finally
{
if (socket != null)
socket.close();
}
close(true);
}
}
<<<<<<< MINE
public int getSourceFlag()
{
return (int)(context.right >> 32);
}
public long getSessionId()
=======
protected void closeInternal(boolean success)
>>>>>>> YOURS
{
sessions.remove(context);
if (!success && FailureDetector.instance.isAlive(getHost()))
{
try
{
StreamReply reply = new StreamReply("", getSessionId(), StreamReply.Status.SESSION_FAILURE);
MessagingService.instance().sendOneWay(reply.getMessage(Gossiper.instance.getVersion(getHost())), getHost());
}
catch (IOException ex)
{
logger.error("Error sending streaming session failure notification to " + getHost(), ex);
}
}
}
/** query method to determine which hosts are streaming to this node. */
public static Set<InetAddress> getSources()
{
HashSet<InetAddress> set = new HashSet<InetAddress>();
for (StreamInSession session : sessions.values())
{
set.add(session.getHost());
}
return set;
}
/** query the status of incoming files. */
public static Set<PendingFile> getIncomingFiles(InetAddress host)
{
Set<PendingFile> set = new HashSet<PendingFile>();
for (Map.Entry<Pair<InetAddress, Long>, StreamInSession> entry : sessions.entrySet())
{
if (entry.getKey().left.equals(host))
{
StreamInSession session = entry.getValue();
if (session.current != null)
set.add(session.current);
set.addAll(session.files);
}
}
return set;
}
}
Diff Result
No diff
Case 11 - java_cassandra.rev_5882a_47112..SSTableWriter.java
Base
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable
{
private static Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey<?> lastWrittenKey;
private FileMark dataMark;
private SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException
{
if (decoratedKey == null)
{
throw new IOException("Keys must not be null.");
}
if (lastWrittenKey != null && lastWrittenKey.compareTo(decoratedKey) > 0)
{
logger.info("Last written key : " + lastWrittenKey);
logger.info("Current key : " + decoratedKey);
logger.info("Writing into file " + getFilename());
throw new IOException("Keys must be written in ascending order.");
}
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private void afterAppend(DecoratedKey<?> decoratedKey, long dataPosition) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
iwriter.afterAppend(decoratedKey, dataPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
public long append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
/*
* The max timestamp is not always collected here (more precisely, row.maxTimestamp() may return Long.MIN_VALUE),
* to avoid deserializing an EchoedRow.
* This is the reason why it is collected first when calling ColumnFamilyStore.createCompactionWriter
* However, for old sstables without timestamp, we still want to update the timestamp (and we know
* that in this case we will not use EchoedRow, since CompactionControler.needsDeserialize() will be true).
*/
sstableMetadataCollector.updateMaxTimestamp(row.maxTimestamp());
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(row.columnCount());
afterAppend(row.key, currentPosition);
return currentPosition;
}
public void append(DecoratedKey<?> decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// serialize index and bloom filter into in-memory structure
ColumnIndexer.RowHeader header = ColumnIndexer.serialize(cf);
// write out row size
dataFile.stream.writeLong(header.serializedSize() + cf.serializedSizeForSSTable());
// write out row header and data
int columnCount = ColumnFamily.serializer().serializeWithIndexes(cf, header, dataFile.stream);
afterAppend(decoratedKey, startPosition);
// track max column timestamp
sstableMetadataCollector.updateMaxTimestamp(cf.maxTimestamp());
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - startPosition);
sstableMetadataCollector.addColumnCount(columnCount);
}
public void append(DecoratedKey<?> decoratedKey, ByteBuffer value) throws IOException
{
long currentPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
assert value.remaining() > 0;
dataFile.stream.writeLong(value.remaining());
ByteBufferUtil.write(value, dataFile.stream);
afterAppend(decoratedKey, currentPosition);
}
public long appendFromStream(DecoratedKey<?> key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// write BF
int bfSize = in.readInt();
dataFile.stream.writeInt(bfSize);
for (int i = 0; i < bfSize; i++)
dataFile.stream.writeByte(in.readByte());
// write index
int indexSize = in.readInt();
dataFile.stream.writeInt(indexSize);
for (int i = 0; i < indexSize; i++)
dataFile.stream.writeByte(in.readByte());
// cf data
dataFile.stream.writeInt(in.readInt());
dataFile.stream.writeLong(in.readLong());
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = cf.getColumnSerializer().deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
afterAppend(key, currentPosition);
return currentPosition;
}
public void updateMaxTimestamp(long timestamp)
{
sstableMetadataCollector.updateMaxTimestamp(timestamp);
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable
{
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final BloomFilter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null
? BloomFilter.getFilter(keyCount, 15)
: BloomFilter.getFilter(keyCount, fpChance);
}
public void afterAppend(DecoratedKey<?> key, long dataPosition) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
indexFile.stream.writeLong(dataPosition);
if (logger.isTraceEnabled())
logger.trace("wrote index of " + key + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
BloomFilter.serializer().serialize(bf, stream);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable
{
private static Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey<?> lastWrittenKey;
private FileMark dataMark;
private SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException
{
if (decoratedKey == null)
{
throw new IOException("Keys must not be null.");
}
if (lastWrittenKey != null && lastWrittenKey.compareTo(decoratedKey) > 0)
{
logger.info("Last written key : " + lastWrittenKey);
logger.info("Current key : " + decoratedKey);
logger.info("Writing into file " + getFilename());
throw new IOException("Keys must be written in ascending order.");
}
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private void afterAppend(DecoratedKey<?> decoratedKey, long dataPosition) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
iwriter.afterAppend(decoratedKey, dataPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
public long append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
/*
* The max timestamp is not always collected here (more precisely, row.maxTimestamp() may return Long.MIN_VALUE),
* to avoid deserializing an EchoedRow.
* This is the reason why it is collected first when calling ColumnFamilyStore.createCompactionWriter
* However, for old sstables without timestamp, we still want to update the timestamp (and we know
* that in this case we will not use EchoedRow, since CompactionControler.needsDeserialize() will be true).
*/
sstableMetadataCollector.updateMaxTimestamp(row.maxTimestamp());
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(row.columnCount());
afterAppend(row.key, currentPosition);
return currentPosition;
}
public void append(DecoratedKey<?> decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// serialize index and bloom filter into in-memory structure
ColumnIndexer.RowHeader header = ColumnIndexer.serialize(cf);
// write out row size
dataFile.stream.writeLong(header.serializedSize() + cf.serializedSizeForSSTable());
// write out row header and data
int columnCount = ColumnFamily.serializer().serializeWithIndexes(cf, header, dataFile.stream);
afterAppend(decoratedKey, startPosition);
// track max column timestamp
sstableMetadataCollector.updateMaxTimestamp(cf.maxTimestamp());
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - startPosition);
sstableMetadataCollector.addColumnCount(columnCount);
}
public void append(DecoratedKey<?> decoratedKey, ByteBuffer value) throws IOException
{
long currentPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
assert value.remaining() > 0;
dataFile.stream.writeLong(value.remaining());
ByteBufferUtil.write(value, dataFile.stream);
afterAppend(decoratedKey, currentPosition);
}
public long appendFromStream(DecoratedKey<?> key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// write BF
int bfSize = in.readInt();
dataFile.stream.writeInt(bfSize);
for (int i = 0; i < bfSize; i++)
dataFile.stream.writeByte(in.readByte());
// write index
int indexSize = in.readInt();
dataFile.stream.writeInt(indexSize);
for (int i = 0; i < indexSize; i++)
dataFile.stream.writeByte(in.readByte());
// cf data
dataFile.stream.writeInt(in.readInt());
dataFile.stream.writeLong(in.readLong());
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = cf.getColumnSerializer().deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
afterAppend(key, currentPosition);
return currentPosition;
}
public void updateMaxTimestamp(long timestamp)
{
sstableMetadataCollector.updateMaxTimestamp(timestamp);
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable
{
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final BloomFilter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null
? BloomFilter.getFilter(keyCount, 15)
: BloomFilter.getFilter(keyCount, fpChance);
}
public void afterAppend(DecoratedKey<?> key, long dataPosition) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
indexFile.stream.writeLong(dataPosition);
if (logger.isTraceEnabled())
logger.trace("wrote index of " + key + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
BloomFilter.serializer().serialize(bf, stream);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
Left
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable
{
private static Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey<?> lastWrittenKey;
private FileMark dataMark;
private SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException
{
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0
: "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private void afterAppend(DecoratedKey<?> decoratedKey, long dataPosition) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
iwriter.afterAppend(decoratedKey, dataPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
public long append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
/*
* The max timestamp is not always collected here (more precisely, row.maxTimestamp() may return Long.MIN_VALUE),
* to avoid deserializing an EchoedRow.
* This is the reason why it is collected first when calling ColumnFamilyStore.createCompactionWriter
* However, for old sstables without timestamp, we still want to update the timestamp (and we know
* that in this case we will not use EchoedRow, since CompactionControler.needsDeserialize() will be true).
*/
sstableMetadataCollector.updateMaxTimestamp(row.maxTimestamp());
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(row.columnCount());
afterAppend(row.key, currentPosition);
return currentPosition;
}
public void append(DecoratedKey<?> decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// serialize index and bloom filter into in-memory structure
ColumnIndexer.RowHeader header = ColumnIndexer.serialize(cf);
// write out row size
dataFile.stream.writeLong(header.serializedSize() + cf.serializedSizeForSSTable());
// write out row header and data
int columnCount = ColumnFamily.serializer().serializeWithIndexes(cf, header, dataFile.stream);
afterAppend(decoratedKey, startPosition);
// track max column timestamp
sstableMetadataCollector.updateMaxTimestamp(cf.maxTimestamp());
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - startPosition);
sstableMetadataCollector.addColumnCount(columnCount);
}
public void append(DecoratedKey<?> decoratedKey, ByteBuffer value) throws IOException
{
long currentPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
assert value.remaining() > 0;
dataFile.stream.writeLong(value.remaining());
ByteBufferUtil.write(value, dataFile.stream);
afterAppend(decoratedKey, currentPosition);
}
public long appendFromStream(DecoratedKey<?> key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// write BF
int bfSize = in.readInt();
dataFile.stream.writeInt(bfSize);
for (int i = 0; i < bfSize; i++)
dataFile.stream.writeByte(in.readByte());
// write index
int indexSize = in.readInt();
dataFile.stream.writeInt(indexSize);
for (int i = 0; i < indexSize; i++)
dataFile.stream.writeByte(in.readByte());
// cf data
dataFile.stream.writeInt(in.readInt());
dataFile.stream.writeLong(in.readLong());
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = cf.getColumnSerializer().deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
afterAppend(key, currentPosition);
return currentPosition;
}
public void updateMaxTimestamp(long timestamp)
{
sstableMetadataCollector.updateMaxTimestamp(timestamp);
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable
{
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final BloomFilter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null
? BloomFilter.getFilter(keyCount, 15)
: BloomFilter.getFilter(keyCount, fpChance);
}
public void afterAppend(DecoratedKey<?> key, long dataPosition) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
indexFile.stream.writeLong(dataPosition);
if (logger.isTraceEnabled())
logger.trace("wrote index of " + key + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
BloomFilter.serializer().serialize(bf, stream);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable
{
private static Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey<?> lastWrittenKey;
private FileMark dataMark;
private SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException
{
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0
: "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private void afterAppend(DecoratedKey<?> decoratedKey, long dataPosition) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
iwriter.afterAppend(decoratedKey, dataPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
public long append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
/*
* The max timestamp is not always collected here (more precisely, row.maxTimestamp() may return Long.MIN_VALUE),
* to avoid deserializing an EchoedRow.
* This is the reason why it is collected first when calling ColumnFamilyStore.createCompactionWriter
* However, for old sstables without timestamp, we still want to update the timestamp (and we know
* that in this case we will not use EchoedRow, since CompactionControler.needsDeserialize() will be true).
*/
sstableMetadataCollector.updateMaxTimestamp(row.maxTimestamp());
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(row.columnCount());
afterAppend(row.key, currentPosition);
return currentPosition;
}
public void append(DecoratedKey<?> decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// serialize index and bloom filter into in-memory structure
ColumnIndexer.RowHeader header = ColumnIndexer.serialize(cf);
// write out row size
dataFile.stream.writeLong(header.serializedSize() + cf.serializedSizeForSSTable());
// write out row header and data
int columnCount = ColumnFamily.serializer().serializeWithIndexes(cf, header, dataFile.stream);
afterAppend(decoratedKey, startPosition);
// track max column timestamp
sstableMetadataCollector.updateMaxTimestamp(cf.maxTimestamp());
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - startPosition);
sstableMetadataCollector.addColumnCount(columnCount);
}
public void append(DecoratedKey<?> decoratedKey, ByteBuffer value) throws IOException
{
long currentPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
assert value.remaining() > 0;
dataFile.stream.writeLong(value.remaining());
ByteBufferUtil.write(value, dataFile.stream);
afterAppend(decoratedKey, currentPosition);
}
public long appendFromStream(DecoratedKey<?> key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// write BF
int bfSize = in.readInt();
dataFile.stream.writeInt(bfSize);
for (int i = 0; i < bfSize; i++)
dataFile.stream.writeByte(in.readByte());
// write index
int indexSize = in.readInt();
dataFile.stream.writeInt(indexSize);
for (int i = 0; i < indexSize; i++)
dataFile.stream.writeByte(in.readByte());
// cf data
dataFile.stream.writeInt(in.readInt());
dataFile.stream.writeLong(in.readLong());
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = cf.getColumnSerializer().deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
afterAppend(key, currentPosition);
return currentPosition;
}
public void updateMaxTimestamp(long timestamp)
{
sstableMetadataCollector.updateMaxTimestamp(timestamp);
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable
{
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final BloomFilter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null
? BloomFilter.getFilter(keyCount, 15)
: BloomFilter.getFilter(keyCount, fpChance);
}
public void afterAppend(DecoratedKey<?> key, long dataPosition) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
indexFile.stream.writeLong(dataPosition);
if (logger.isTraceEnabled())
logger.trace("wrote index of " + key + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
BloomFilter.serializer().serialize(bf, stream);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
private long beforeAppend(DecoratedKey decoratedKey) throws IOException
{
if (decoratedKey == null)
{
throw new IOException("Keys must not be null.");
}
if (lastWrittenKey != null && lastWrittenKey.compareTo(decoratedKey) > 0)
{
logger.info("Last written key : " + lastWrittenKey);
logger.info("Current key : " + decoratedKey);
logger.info("Writing into file " + getFilename());
throw new IOException("Keys must be written in ascending order.");
}
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE)
{
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable
{
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15)
: FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
private long beforeAppend(DecoratedKey decoratedKey) throws IOException
{
if (decoratedKey == null)
{
throw new IOException("Keys must not be null.");
}
if (lastWrittenKey != null && lastWrittenKey.compareTo(decoratedKey) > 0)
{
logger.info("Last written key : " + lastWrittenKey);
logger.info("Current key : " + decoratedKey);
logger.info("Writing into file " + getFilename());
throw new IOException("Keys must be written in ascending order.");
}
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE)
{
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable
{
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15)
: FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
MergeMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException {
this(filename, keyCount, Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)), StorageService.getPartitioner(), SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata) {
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename, long keyCount, CFMetaData metadata, IPartitioner<?> partitioner, SSTableMetadata.Collector sstableMetadataCollector) throws IOException {
super(Descriptor.fromFilename(filename), components(metadata), metadata, partitioner);
iwriter = new IndexWriter(keyCount);
if (compression) {
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(), descriptor.filenameFor(Component.COMPRESSION_INFO), true, metadata.compressionParameters(), sstableMetadataCollector);
} else {
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark() {
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate() {
try {
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
} catch (IOException e) {
throw new IOError(e);
}
}
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
private long beforeAppend(DecoratedKey decoratedKey) throws IOException {
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0 : "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException {
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if (null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException {
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8) : "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException {
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException {
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++) {
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn) {
column = ((CounterColumn) column).markDeltaToBeCleared();
} else if (column instanceof SuperColumn) {
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns()) {
if (subColumn instanceof CounterColumn) {
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE) {
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8) : "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort() {
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try {
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
} catch (Exception e) {
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException {
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException {
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc, components, metadata, partitioner, ifile, dfile, iwriter.summary, iwriter.bf, maxDataAge, sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException {
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException {
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components) {
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components) {
try {
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA))) FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
} catch (IOException e) {
throw new IOError(e);
}
}
public long getFilePointer() {
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable {
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException {
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0) {
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15) : FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException {
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException {
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
// calls force
indexFile.close();
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark() {
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException {
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString() {
return "IndexWriter(" + descriptor + ")";
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException {
this(filename, keyCount, Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)), StorageService.getPartitioner(), SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata) {
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename, long keyCount, CFMetaData metadata, IPartitioner<?> partitioner, SSTableMetadata.Collector sstableMetadataCollector) throws IOException {
super(Descriptor.fromFilename(filename), components(metadata), metadata, partitioner);
iwriter = new IndexWriter(keyCount);
if (compression) {
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(), descriptor.filenameFor(Component.COMPRESSION_INFO), true, metadata.compressionParameters(), sstableMetadataCollector);
} else {
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark() {
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate() {
try {
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
} catch (IOException e) {
throw new IOError(e);
}
}
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
private long beforeAppend(DecoratedKey decoratedKey) throws IOException {
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0 : "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException {
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if (null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException {
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8) : "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException {
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException {
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++) {
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn) {
column = ((CounterColumn) column).markDeltaToBeCleared();
} else if (column instanceof SuperColumn) {
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns()) {
if (subColumn instanceof CounterColumn) {
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE) {
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8) : "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort() {
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try {
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
} catch (Exception e) {
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException {
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException {
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc, components, metadata, partitioner, ifile, dfile, iwriter.summary, iwriter.bf, maxDataAge, sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException {
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException {
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components) {
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components) {
try {
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA))) FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
} catch (IOException e) {
throw new IOError(e);
}
}
public long getFilePointer() {
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable {
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException {
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0) {
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15) : FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException {
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException {
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
// calls force
indexFile.close();
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark() {
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException {
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString() {
return "IndexWriter(" + descriptor + ")";
}
}
}
KeepBothMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException {
this(filename, keyCount, Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)), StorageService.getPartitioner(), SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata) {
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename, long keyCount, CFMetaData metadata, IPartitioner<?> partitioner, SSTableMetadata.Collector sstableMetadataCollector) throws IOException {
super(Descriptor.fromFilename(filename), components(metadata), metadata, partitioner);
iwriter = new IndexWriter(keyCount);
if (compression) {
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(), descriptor.filenameFor(Component.COMPRESSION_INFO), true, metadata.compressionParameters(), sstableMetadataCollector);
} else {
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark() {
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate() {
try {
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
} catch (IOException e) {
throw new IOError(e);
}
}
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException {
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0 : "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private long beforeAppend(DecoratedKey decoratedKey) throws IOException {
if (decoratedKey == null) {
throw new IOException("Keys must not be null.");
}
if (lastWrittenKey != null && lastWrittenKey.compareTo(decoratedKey) > 0) {
logger.info("Last written key : " + lastWrittenKey);
logger.info("Current key : " + decoratedKey);
logger.info("Writing into file " + getFilename());
throw new IOException("Keys must be written in ascending order.");
}
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException {
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if (null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException {
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8) : "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException {
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException {
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++) {
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn) {
column = ((CounterColumn) column).markDeltaToBeCleared();
} else if (column instanceof SuperColumn) {
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns()) {
if (subColumn instanceof CounterColumn) {
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE) {
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8) : "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort() {
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try {
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
} catch (Exception e) {
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException {
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException {
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc, components, metadata, partitioner, ifile, dfile, iwriter.summary, iwriter.bf, maxDataAge, sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException {
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException {
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components) {
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components) {
try {
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA))) FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
} catch (IOException e) {
throw new IOError(e);
}
}
public long getFilePointer() {
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable {
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException {
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0) {
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15) : FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException {
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException {
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
// calls force
indexFile.close();
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark() {
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException {
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString() {
return "IndexWriter(" + descriptor + ")";
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException {
this(filename, keyCount, Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)), StorageService.getPartitioner(), SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata) {
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename, long keyCount, CFMetaData metadata, IPartitioner<?> partitioner, SSTableMetadata.Collector sstableMetadataCollector) throws IOException {
super(Descriptor.fromFilename(filename), components(metadata), metadata, partitioner);
iwriter = new IndexWriter(keyCount);
if (compression) {
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(), descriptor.filenameFor(Component.COMPRESSION_INFO), true, metadata.compressionParameters(), sstableMetadataCollector);
} else {
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark() {
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate() {
try {
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
} catch (IOException e) {
throw new IOError(e);
}
}
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException {
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0 : "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private long beforeAppend(DecoratedKey decoratedKey) throws IOException {
if (decoratedKey == null) {
throw new IOException("Keys must not be null.");
}
if (lastWrittenKey != null && lastWrittenKey.compareTo(decoratedKey) > 0) {
logger.info("Last written key : " + lastWrittenKey);
logger.info("Current key : " + decoratedKey);
logger.info("Writing into file " + getFilename());
throw new IOException("Keys must be written in ascending order.");
}
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException {
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if (null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException {
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8) : "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException {
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException {
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++) {
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn) {
column = ((CounterColumn) column).markDeltaToBeCleared();
} else if (column instanceof SuperColumn) {
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns()) {
if (subColumn instanceof CounterColumn) {
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE) {
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8) : "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort() {
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try {
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
} catch (Exception e) {
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException {
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException {
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc, components, metadata, partitioner, ifile, dfile, iwriter.summary, iwriter.bf, maxDataAge, sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException {
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException {
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components) {
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components) {
try {
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA))) FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
} catch (IOException e) {
throw new IOError(e);
}
}
public long getFilePointer() {
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable {
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException {
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0) {
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15) : FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException {
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException {
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
// calls force
indexFile.close();
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark() {
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException {
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString() {
return "IndexWriter(" + descriptor + ")";
}
}
}
Safe
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
<<<<<<< MINE
private long beforeAppend(DecoratedKey decoratedKey) throws IOException
{
if (decoratedKey == null)
{
throw new IOException("Keys must not be null.");
}
if (lastWrittenKey != null && lastWrittenKey.compareTo(decoratedKey) > 0)
{
logger.info("Last written key : " + lastWrittenKey);
logger.info("Current key : " + decoratedKey);
logger.info("Writing into file " + getFilename());
throw new IOException("Keys must be written in ascending order.");
}
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
=======
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException
{
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0
: "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
>>>>>>> YOURS
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE)
{
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable {
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15)
: FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable {
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
<<<<<<< MINE
private long beforeAppend(DecoratedKey decoratedKey) throws IOException
{
if (decoratedKey == null)
{
throw new IOException("Keys must not be null.");
}
if (lastWrittenKey != null && lastWrittenKey.compareTo(decoratedKey) > 0)
{
logger.info("Last written key : " + lastWrittenKey);
logger.info("Current key : " + decoratedKey);
logger.info("Writing into file " + getFilename());
throw new IOException("Keys must be written in ascending order.");
}
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
=======
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException
{
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0
: "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
>>>>>>> YOURS
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE)
{
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable {
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15)
: FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
<<<<<<< MINE
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException
=======
private long beforeAppend(DecoratedKey decoratedKey) throws IOException
>>>>>>> YOURS
{
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0
: "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE)
{
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable
{
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15)
: FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.sstable;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Pattern;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.*;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.io.IColumnSerializer;
import org.apache.cassandra.io.compress.CompressedSequentialWriter;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.*;
public class SSTableWriter extends SSTable
{
private static final Logger logger = LoggerFactory.getLogger(SSTableWriter.class);
private IndexWriter iwriter;
private SegmentedFile.Builder dbuilder;
private final SequentialWriter dataFile;
private DecoratedKey lastWrittenKey;
private FileMark dataMark;
private final SSTableMetadata.Collector sstableMetadataCollector;
public SSTableWriter(String filename, long keyCount) throws IOException
{
this(filename,
keyCount,
Schema.instance.getCFMetaData(Descriptor.fromFilename(filename)),
StorageService.getPartitioner(),
SSTableMetadata.createCollector());
}
private static Set<Component> components(CFMetaData metadata)
{
Set<Component> components = new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS));
if (metadata.compressionParameters().sstableCompressor != null)
components.add(Component.COMPRESSION_INFO);
else
// it would feel safer to actually add this component later in maybeWriteDigest(),
// but the components are unmodifiable after construction
components.add(Component.DIGEST);
return components;
}
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector) throws IOException
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
true,
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), true);
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
public void mark()
{
dataMark = dataFile.mark();
iwriter.mark();
}
public void resetAndTruncate()
{
try
{
dataFile.resetAndTruncate(dataMark);
iwriter.resetAndTruncate();
}
catch (IOException e)
{
throw new IOError(e);
}
}
<<<<<<< MINE
/**
* Perform sanity checks on @param decoratedKey and @return the position in the data file before any data is written
*/
private long beforeAppend(DecoratedKey<?> decoratedKey) throws IOException
=======
private long beforeAppend(DecoratedKey decoratedKey) throws IOException
>>>>>>> YOURS
{
assert decoratedKey != null : "Keys must not be null";
assert lastWrittenKey == null || lastWrittenKey.compareTo(decoratedKey) < 0
: "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename();
return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
}
private RowIndexEntry afterAppend(DecoratedKey decoratedKey, long dataPosition, DeletionInfo delInfo, ColumnIndex index) throws IOException
{
lastWrittenKey = decoratedKey;
this.last = lastWrittenKey;
if(null == this.first)
this.first = lastWrittenKey;
if (logger.isTraceEnabled())
logger.trace("wrote " + decoratedKey + " at " + dataPosition);
RowIndexEntry entry = RowIndexEntry.create(dataPosition, delInfo, index);
iwriter.append(decoratedKey, entry);
dbuilder.addPotentialBoundary(dataPosition);
return entry;
}
public RowIndexEntry append(AbstractCompactedRow row) throws IOException
{
long currentPosition = beforeAppend(row.key);
ByteBufferUtil.writeWithShortLength(row.key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
long dataSize = row.write(dataFile.stream);
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
return afterAppend(row.key, currentPosition, row.deletionInfo(), row.index());
}
public void append(DecoratedKey decoratedKey, ColumnFamily cf) throws IOException
{
long startPosition = beforeAppend(decoratedKey);
ByteBufferUtil.writeWithShortLength(decoratedKey.key, dataFile.stream);
// build column index
// TODO: build and serialization could be merged
ColumnIndex index = new ColumnIndex.Builder(cf.getComparator(), decoratedKey.key, cf.getColumnCount()).build(cf);
// write out row size + data
dataFile.stream.writeLong(cf.serializedSizeForSSTable());
ColumnFamily.serializer().serializeForSSTable(cf, dataFile.stream);
afterAppend(decoratedKey, startPosition, cf.deletionInfo(), index);
sstableMetadataCollector.update(dataFile.getFilePointer() - startPosition, cf.getColumnStats());
}
public long appendFromStream(DecoratedKey key, CFMetaData metadata, long dataSize, DataInput in) throws IOException
{
long currentPosition = beforeAppend(key);
ByteBufferUtil.writeWithShortLength(key.key, dataFile.stream);
long dataStart = dataFile.getFilePointer();
// write row size
dataFile.stream.writeLong(dataSize);
// cf data
int lct = in.readInt();
long mfda = in.readLong();
DeletionInfo deletionInfo = new DeletionInfo(mfda, lct);
dataFile.stream.writeInt(lct);
dataFile.stream.writeLong(mfda);
// column size
int columnCount = in.readInt();
dataFile.stream.writeInt(columnCount);
// deserialize each column to obtain maxTimestamp and immediately serialize it.
long maxTimestamp = Long.MIN_VALUE;
StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
ColumnFamily cf = ColumnFamily.create(metadata, ArrayBackedSortedColumns.factory());
ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf.getComparator(), key.key, columnCount);
IColumnSerializer columnSerializer = cf.getColumnSerializer();
for (int i = 0; i < columnCount; i++)
{
// deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
// data size received, so we must reserialize the exact same data
IColumn column = columnSerializer.deserialize(in, IColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE);
if (column instanceof CounterColumn)
{
column = ((CounterColumn) column).markDeltaToBeCleared();
}
else if (column instanceof SuperColumn)
{
SuperColumn sc = (SuperColumn) column;
for (IColumn subColumn : sc.getSubColumns())
{
if (subColumn instanceof CounterColumn)
{
IColumn marked = ((CounterColumn) subColumn).markDeltaToBeCleared();
sc.replace(subColumn, marked);
}
}
}
int deletionTime = column.getLocalDeletionTime();
if (deletionTime < Integer.MAX_VALUE)
{
tombstones.update(deletionTime);
}
maxTimestamp = Math.max(maxTimestamp, column.maxTimestamp());
cf.getColumnSerializer().serialize(column, dataFile.stream);
columnIndexer.add(column);
}
assert dataSize == dataFile.getFilePointer() - (dataStart + 8)
: "incorrect row data size " + dataSize + " written to " + dataFile.getPath() + "; correct is " + (dataFile.getFilePointer() - (dataStart + 8));
sstableMetadataCollector.updateMaxTimestamp(maxTimestamp);
sstableMetadataCollector.addRowSize(dataFile.getFilePointer() - currentPosition);
sstableMetadataCollector.addColumnCount(columnCount);
sstableMetadataCollector.mergeTombstoneHistogram(tombstones);
afterAppend(key, currentPosition, deletionInfo, columnIndexer.build());
return currentPosition;
}
/**
* After failure, attempt to close the index writer and data file before deleting all temp components for the sstable
*/
public void abort()
{
assert descriptor.temporary;
FileUtils.closeQuietly(iwriter);
FileUtils.closeQuietly(dataFile);
try
{
Set<Component> components = SSTable.componentsFor(descriptor);
if (!components.isEmpty())
SSTable.delete(descriptor, components);
}
catch (Exception e)
{
logger.error(String.format("Failed deleting temp components for %s", descriptor), e);
}
}
public SSTableReader closeAndOpenReader() throws IOException
{
return closeAndOpenReader(System.currentTimeMillis());
}
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data, close will truncate if necessary
dataFile.close();
// write sstable statistics
SSTableMetadata sstableMetadata = sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName());
writeMetadata(descriptor, sstableMetadata);
maybeWriteDigest();
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary,
iwriter.bf,
maxDataAge,
sstableMetadata);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
iwriter = null;
dbuilder = null;
return sstable;
}
private void maybeWriteDigest() throws IOException
{
byte[] digest = dataFile.digest();
if (digest == null)
return;
SequentialWriter out = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_DIGEST)), true);
// Writting output compatible with sha1sum
Descriptor newdesc = descriptor.asTemporary(false);
String[] tmp = newdesc.filenameFor(SSTable.COMPONENT_DATA).split(Pattern.quote(File.separator));
String dataFileName = tmp[tmp.length - 1];
out.write(String.format("%s %s", Hex.bytesToHex(digest), dataFileName).getBytes());
out.close();
}
private static void writeMetadata(Descriptor desc, SSTableMetadata sstableMetadata) throws IOException
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(SSTable.COMPONENT_STATS)), true);
SSTableMetadata.serializer.serialize(sstableMetadata, out.stream);
out.close();
}
static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
{
Descriptor newdesc = tmpdesc.asTemporary(false);
rename(tmpdesc, newdesc, components);
return newdesc;
}
public static void rename(Descriptor tmpdesc, Descriptor newdesc, Set<Component> components)
{
try
{
// do -Data last because -Data present should mean the sstable was completely renamed before crash
for (Component component : Sets.difference(components, Collections.singleton(Component.DATA)))
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(component), newdesc.filenameFor(component));
FBUtilities.renameWithConfirm(tmpdesc.filenameFor(Component.DATA), newdesc.filenameFor(Component.DATA));
}
catch (IOException e)
{
throw new IOError(e);
}
}
public long getFilePointer()
{
return dataFile.getFilePointer();
}
/**
* Encapsulates writing the index and filter for an SSTable. The state of this object is not valid until it has been closed.
*/
class IndexWriter implements Closeable
{
private final SequentialWriter indexFile;
public final SegmentedFile.Builder builder;
public final IndexSummary summary;
public final Filter bf;
private FileMark mark;
IndexWriter(long keyCount) throws IOException
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(SSTable.COMPONENT_INDEX)), true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
Double fpChance = metadata.getBloomFilterFpChance();
if (fpChance != null && fpChance == 0)
{
// paranoia -- we've had bugs in the thrift <-> avro <-> CfDef dance before, let's not let that break things
logger.error("Bloom filter FP chance of zero isn't supposed to happen");
fpChance = null;
}
bf = fpChance == null ? FilterFactory.getFilter(keyCount, 15)
: FilterFactory.getFilter(keyCount, fpChance);
}
public void append(DecoratedKey key, RowIndexEntry indexEntry) throws IOException
{
bf.add(key.key);
long indexPosition = indexFile.getFilePointer();
ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
if (logger.isTraceEnabled())
logger.trace("wrote index entry: " + indexEntry + " at " + indexPosition);
summary.maybeAddEntry(key, indexPosition);
builder.addPotentialBoundary(indexPosition);
}
/**
* Closes the index and bloomfilter, making the public state of this writer valid for consumption.
*/
public void close() throws IOException
{
// bloom filter
FileOutputStream fos = new FileOutputStream(descriptor.filenameFor(SSTable.COMPONENT_FILTER));
DataOutputStream stream = new DataOutputStream(fos);
FilterFactory.serialize(bf, stream, descriptor.filterType);
stream.flush();
fos.getFD().sync();
stream.close();
// index
long position = indexFile.getFilePointer();
indexFile.close(); // calls force
FileUtils.truncate(indexFile.getPath(), position);
// finalize in-memory index state
summary.complete();
}
public void mark()
{
mark = indexFile.mark();
}
public void resetAndTruncate() throws IOException
{
// we can't un-set the bloom filter addition, but extra keys in there are harmless.
// we can't reset dbuilder either, but that is the last thing called in afterappend so
// we assume that if that worked then we won't be trying to reset.
indexFile.resetAndTruncate(mark);
}
@Override
public String toString()
{
return "IndexWriter(" + descriptor + ")";
}
}
}
Diff Result
No diff
Case 12 - java_cassandra.rev_6a4af_a7b2f..SecondaryIndexManager.java
Base
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.IndexExpression;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(IColumn column) { }
public void update(IColumn oldColumn, IColumn column) { }
public void remove(IColumn current) { }
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumn_metadata().get(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
for (ColumnDefinition cdef : baseCfs.metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return indexFor(name, indexes) != null;
}
public SecondaryIndex indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
return index;
}
return null;
}
public boolean indexes(IColumn column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public SecondaryIndex indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (IColumn column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<IColumn> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (IColumn column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name);
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
public void insert(IColumn column);
public void update(IColumn oldColumn, IColumn column);
public void remove(IColumn current);
}
private class PerColumnIndexUpdater implements Updater
{
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater
{
private final DecoratedKey key;
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.IndexExpression;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(IColumn column) { }
public void update(IColumn oldColumn, IColumn column) { }
public void remove(IColumn current) { }
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumn_metadata().get(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
for (ColumnDefinition cdef : baseCfs.metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return indexFor(name, indexes) != null;
}
public SecondaryIndex indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
return index;
}
return null;
}
public boolean indexes(IColumn column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public SecondaryIndex indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (IColumn column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<IColumn> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (IColumn column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name);
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
public void insert(IColumn column);
public void update(IColumn oldColumn, IColumn column);
public void remove(IColumn current);
}
private class PerColumnIndexUpdater implements Updater
{
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater
{
private final DecoratedKey key;
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
}
Left
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.IndexExpression;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(IColumn column) { }
public void update(IColumn oldColumn, IColumn column) { }
public void remove(IColumn current) { }
public void commit() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumn_metadata().get(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
for (ColumnDefinition cdef : baseCfs.metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return indexFor(name, indexes) != null;
}
public SecondaryIndex indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
return index;
}
return null;
}
public boolean indexes(IColumn column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public SecondaryIndex indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (IColumn column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<IColumn> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (IColumn column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name);
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(IColumn column);
/** called when updating the index from a memtable */
public void update(IColumn oldColumn, IColumn column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(IColumn current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
}
private class PerColumnIndexUpdater implements Updater
{
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
}
private class MixedIndexUpdater implements Updater
{
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.IndexExpression;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(IColumn column) { }
public void update(IColumn oldColumn, IColumn column) { }
public void remove(IColumn current) { }
public void commit() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumn_metadata().get(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
for (ColumnDefinition cdef : baseCfs.metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return indexFor(name, indexes) != null;
}
public SecondaryIndex indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
return index;
}
return null;
}
public boolean indexes(IColumn column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public SecondaryIndex indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (IColumn column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<IColumn> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (IColumn column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name);
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(IColumn column);
/** called when updating the index from a memtable */
public void update(IColumn oldColumn, IColumn column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(IColumn current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
}
private class PerColumnIndexUpdater implements Updater
{
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
}
private class MixedIndexUpdater implements Updater
{
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
}
}
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
public void remove(Column current) { }
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
public void insert(Column column);
public void update(Column oldColumn, Column column);
public void remove(Column current);
}
private class PerColumnIndexUpdater implements Updater
{
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater
{
private final DecoratedKey key;
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
public void remove(Column current) { }
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
public void insert(Column column);
public void update(Column oldColumn, Column column);
public void remove(Column current);
}
private class PerColumnIndexUpdater implements Updater
{
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater
{
private final DecoratedKey key;
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
}
}
MergeMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
<<<<<<< MINE
public void remove(IColumn current) { }
public void commit() {}
=======
public void remove(Column current) { }
>>>>>>> YOURS
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
/** called when updating the index from a memtable */
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
public void insert(Column column);
public void update(Column oldColumn, Column column);
public void remove(Column current);
}
private class PerColumnIndexUpdater implements Updater {
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater {
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void update(Column oldColumn, Column column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
<<<<<<< MINE
public void remove(IColumn current) { }
public void commit() {}
=======
public void remove(Column current) { }
>>>>>>> YOURS
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
/** called when updating the index from a memtable */
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
public void insert(Column column);
public void update(Column oldColumn, Column column);
public void remove(Column current);
}
private class PerColumnIndexUpdater implements Updater {
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater {
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void update(Column oldColumn, Column column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
}
}
}
}
KeepBothMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
<<<<<<< MINE
public void remove(IColumn current) { }
public void commit() {}
=======
public void remove(Column current) { }
>>>>>>> YOURS
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
/** called when updating the index from a memtable */
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
public void insert(Column column);
public void update(Column oldColumn, Column column);
public void remove(Column current);
}
private class PerColumnIndexUpdater implements Updater {
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater {
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
}
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
<<<<<<< MINE
public void remove(IColumn current) { }
public void commit() {}
=======
public void remove(Column current) { }
>>>>>>> YOURS
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
/** called when updating the index from a memtable */
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
public void insert(Column column);
public void update(Column oldColumn, Column column);
public void remove(Column current);
}
private class PerColumnIndexUpdater implements Updater {
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater {
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
}
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
}
}
Safe
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
<<<<<<< MINE
public void remove(IColumn current) { }
public void commit() {}
=======
public void remove(Column current) { }
>>>>>>> YOURS
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
/** called when updating the index from a memtable */
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
public void insert(Column column);
public void update(Column oldColumn, Column column);
public void remove(Column current);
}
private class PerColumnIndexUpdater implements Updater {
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater {
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
<<<<<<< MINE
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
=======
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
>>>>>>> YOURS
<<<<<<< MINE
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
=======
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
>>>>>>> YOURS
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
<<<<<<< MINE
public void remove(IColumn current) { }
public void commit() {}
=======
public void remove(Column current) { }
>>>>>>> YOURS
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
/** called when updating the index from a memtable */
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
public void insert(Column column);
public void update(Column oldColumn, Column column);
public void remove(Column current);
}
private class PerColumnIndexUpdater implements Updater {
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
private class MixedIndexUpdater implements Updater {
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
<<<<<<< MINE
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
=======
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
>>>>>>> YOURS
<<<<<<< MINE
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
=======
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
>>>>>>> YOURS
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
<<<<<<< MINE
public void remove(IColumn current) { }
public void commit() {}
=======
public void remove(Column current) { }
>>>>>>> YOURS
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
<<<<<<< MINE
/** called when constructing the index against pre-existing data */
public void insert(IColumn column);
=======
public void insert(Column column);
>>>>>>> YOURS
<<<<<<< MINE
/** called when updating the index from a memtable */
public void update(IColumn oldColumn, IColumn column);
=======
public void update(Column oldColumn, Column column);
>>>>>>> YOURS
<<<<<<< MINE
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(IColumn current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
=======
public void remove(Column current);
>>>>>>> YOURS
}
private class PerColumnIndexUpdater implements Updater
{
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
}
private class MixedIndexUpdater implements Updater
{
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
<<<<<<< MINE
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
=======
for (SecondaryIndex index : indexFor(column.name()))
>>>>>>> YOURS
{
<<<<<<< MINE
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
=======
if (index instanceof PerColumnSecondaryIndex)
{
>>>>>>> YOURS
((PerColumnSecondaryIndex) index).insert(key.key, column);
<<<<<<< MINE
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
=======
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
>>>>>>> YOURS
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
<<<<<<< MINE
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
=======
for (SecondaryIndex index : indexFor(column.name()))
>>>>>>> YOURS
{
<<<<<<< MINE
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
=======
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
>>>>>>> YOURS
}
}
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
<<<<<<< MINE
public void remove(IColumn current) { }
public void commit() {}
=======
public void remove(Column current) { }
>>>>>>> YOURS
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key, boolean includeRowIndexes)
{
return (includeRowIndexes && !rowLevelIndexMap.isEmpty())
? new MixedIndexUpdater(key)
: indexesByColumn.isEmpty() ? nullUpdater : new PerColumnIndexUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
<<<<<<< MINE
/** called when constructing the index against pre-existing data */
public void insert(IColumn column);
=======
public void insert(Column column);
>>>>>>> YOURS
<<<<<<< MINE
/** called when updating the index from a memtable */
public void update(IColumn oldColumn, IColumn column);
=======
public void update(Column oldColumn, Column column);
>>>>>>> YOURS
<<<<<<< MINE
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(IColumn current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void commit();
=======
public void remove(Column current);
>>>>>>> YOURS
}
private class PerColumnIndexUpdater implements Updater
{
private final DecoratedKey key;
public PerColumnIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
public void commit()
{
// this is a no-op as per-column index updates are applied immediately
}
}
private class MixedIndexUpdater implements Updater
{
private final DecoratedKey key;
ConcurrentHashMap<SecondaryIndex, ByteBuffer> deferredUpdates = new ConcurrentHashMap<SecondaryIndex, ByteBuffer>();
public MixedIndexUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
<<<<<<< MINE
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
=======
for (SecondaryIndex index : indexFor(column.name()))
>>>>>>> YOURS
{
<<<<<<< MINE
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
}
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
=======
if (index instanceof PerColumnSecondaryIndex)
{
>>>>>>> YOURS
((PerColumnSecondaryIndex) index).insert(key.key, column);
<<<<<<< MINE
}
else
{
deferredUpdates.putIfAbsent(index, key.key);
=======
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
>>>>>>> YOURS
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
<<<<<<< MINE
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
=======
for (SecondaryIndex index : indexFor(column.name()))
>>>>>>> YOURS
{
<<<<<<< MINE
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
// per-row secondary indexes are assumed to keep the index up-to-date at insert time, rather
// than performing lazy updates
}
}
public void commit()
{
for (Map.Entry<SecondaryIndex, ByteBuffer> update : deferredUpdates.entrySet())
{
assert update.getKey() instanceof PerRowSecondaryIndex;
((PerRowSecondaryIndex) update.getKey()).index(update.getValue());
=======
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
else
{
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key.key);
}
>>>>>>> YOURS
}
}
}
}
Diff Result
No diff
Case 13 - java_cassandra.rev_748b0_c9376..UpdateParameters.java
Base
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.cql3.statements.ColumnGroupMap;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.Pair;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters
{
public final CFMetaData metadata;
public final List<ByteBuffer> variables;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, ColumnGroupMap> prefetchedLists;
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
{
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
}
public Column makeColumn(ByteBuffer name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return Column.create(name, value, timestamp, ttl, metadata);
}
public Column makeCounter(ByteBuffer name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return new CounterUpdateColumn(name, delta, System.currentTimeMillis());
}
public Column makeTombstone(ByteBuffer name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return new DeletedColumn(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
{
QueryProcessor.validateCellName(start);
QueryProcessor.validateCellName(end);
return new RangeTombstone(start, end, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
{
QueryProcessor.validateCellName(start);
QueryProcessor.validateCellName(end);
return new RangeTombstone(start, end, timestamp - 1, localDeletionTime);
}
public List<Pair<ByteBuffer, Column>> getPrefetchedList(ByteBuffer rowKey, ByteBuffer cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
ColumnGroupMap m = prefetchedLists.get(rowKey);
return m == null ? Collections.<Pair<ByteBuffer, Column>>emptyList() : m.getCollection(cql3ColumnName);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.cql3.statements.ColumnGroupMap;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.Pair;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters
{
public final CFMetaData metadata;
public final List<ByteBuffer> variables;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, ColumnGroupMap> prefetchedLists;
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
{
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
}
public Column makeColumn(ByteBuffer name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return Column.create(name, value, timestamp, ttl, metadata);
}
public Column makeCounter(ByteBuffer name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return new CounterUpdateColumn(name, delta, System.currentTimeMillis());
}
public Column makeTombstone(ByteBuffer name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return new DeletedColumn(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
{
QueryProcessor.validateCellName(start);
QueryProcessor.validateCellName(end);
return new RangeTombstone(start, end, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
{
QueryProcessor.validateCellName(start);
QueryProcessor.validateCellName(end);
return new RangeTombstone(start, end, timestamp - 1, localDeletionTime);
}
public List<Pair<ByteBuffer, Column>> getPrefetchedList(ByteBuffer rowKey, ByteBuffer cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
ColumnGroupMap m = prefetchedLists.get(rowKey);
return m == null ? Collections.<Pair<ByteBuffer, Column>>emptyList() : m.getCollection(cql3ColumnName);
}
}
Left
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.cql3.statements.ColumnGroupMap;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.Pair;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters
{
public final CFMetaData metadata;
public final List<ByteBuffer> variables;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, ColumnGroupMap> prefetchedLists;
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
throws InvalidRequestException
{
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// We use MIN_VALUE internally to mean the absence of of timestamp (in Selection, in sstable stats, ...), so exclude
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
public Column makeColumn(ByteBuffer name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return Column.create(name, value, timestamp, ttl, metadata);
}
public Column makeCounter(ByteBuffer name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return new CounterUpdateColumn(name, delta, System.currentTimeMillis());
}
public Column makeTombstone(ByteBuffer name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return new DeletedColumn(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
{
QueryProcessor.validateCellName(start);
QueryProcessor.validateCellName(end);
return new RangeTombstone(start, end, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
{
QueryProcessor.validateCellName(start);
QueryProcessor.validateCellName(end);
return new RangeTombstone(start, end, timestamp - 1, localDeletionTime);
}
public List<Pair<ByteBuffer, Column>> getPrefetchedList(ByteBuffer rowKey, ByteBuffer cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
ColumnGroupMap m = prefetchedLists.get(rowKey);
return m == null ? Collections.<Pair<ByteBuffer, Column>>emptyList() : m.getCollection(cql3ColumnName);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.cql3.statements.ColumnGroupMap;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.Pair;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters
{
public final CFMetaData metadata;
public final List<ByteBuffer> variables;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, ColumnGroupMap> prefetchedLists;
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
throws InvalidRequestException
{
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// We use MIN_VALUE internally to mean the absence of of timestamp (in Selection, in sstable stats, ...), so exclude
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
public Column makeColumn(ByteBuffer name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return Column.create(name, value, timestamp, ttl, metadata);
}
public Column makeCounter(ByteBuffer name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return new CounterUpdateColumn(name, delta, System.currentTimeMillis());
}
public Column makeTombstone(ByteBuffer name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name);
return new DeletedColumn(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
{
QueryProcessor.validateCellName(start);
QueryProcessor.validateCellName(end);
return new RangeTombstone(start, end, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
{
QueryProcessor.validateCellName(start);
QueryProcessor.validateCellName(end);
return new RangeTombstone(start, end, timestamp - 1, localDeletionTime);
}
public List<Pair<ByteBuffer, Column>> getPrefetchedList(ByteBuffer rowKey, ByteBuffer cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
ColumnGroupMap m = prefetchedLists.get(rowKey);
return m == null ? Collections.<Pair<ByteBuffer, Column>>emptyList() : m.getCollection(cql3ColumnName);
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters
{
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists)
{
this.metadata = metadata;
this.options = options;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
}
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters
{
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists)
{
this.metadata = metadata;
this.options = options;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
}
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
MergeMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters {
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists) {
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int) (System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException {
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException {
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName) {
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters {
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists) {
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int) (System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException {
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException {
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName) {
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
KeepBothMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters {
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists) throws InvalidRequestException {
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int) (System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists) {
this.metadata = metadata;
this.options = options;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int) (System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
}
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException {
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException {
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName) {
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters {
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists) throws InvalidRequestException {
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int) (System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists) {
this.metadata = metadata;
this.options = options;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int) (System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
}
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException {
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException {
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException {
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName) {
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
Safe
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters {
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
<<<<<<< MINE
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists)
{
this.metadata = metadata;
this.options = options;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
}
=======
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
throws InvalidRequestException
{
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// We use MIN_VALUE internally to mean the absence of of timestamp (in Selection, in sstable stats, ...), so exclude
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
>>>>>>> YOURS
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters {
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
<<<<<<< MINE
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists)
{
this.metadata = metadata;
this.options = options;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
}
=======
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
throws InvalidRequestException
{
this.metadata = metadata;
this.variables = variables;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// We use MIN_VALUE internally to mean the absence of of timestamp (in Selection, in sstable stats, ...), so exclude
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
>>>>>>> YOURS
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters
{
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
<<<<<<< MINE
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
throws InvalidRequestException
=======
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists)
>>>>>>> YOURS
{
this.metadata = metadata;
this.options = options;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// We use MIN_VALUE internally to mean the absence of of timestamp (in Selection, in sstable stats, ...), so exclude
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.CellName;
import org.apache.cassandra.db.filter.ColumnSlice;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.utils.FBUtilities;
/**
* A simple container that simplify passing parameters for collections methods.
*/
public class UpdateParameters
{
public final CFMetaData metadata;
public final QueryOptions options;
public final long timestamp;
private final int ttl;
public final int localDeletionTime;
// For lists operation that require a read-before-write. Will be null otherwise.
private final Map<ByteBuffer, CQL3Row> prefetchedLists;
<<<<<<< MINE
public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
throws InvalidRequestException
=======
public UpdateParameters(CFMetaData metadata, QueryOptions options, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists)
>>>>>>> YOURS
{
this.metadata = metadata;
this.options = options;
this.timestamp = timestamp;
this.ttl = ttl;
this.localDeletionTime = (int)(System.currentTimeMillis() / 1000);
this.prefetchedLists = prefetchedLists;
// We use MIN_VALUE internally to mean the absence of of timestamp (in Selection, in sstable stats, ...), so exclude
// it to avoid potential confusion.
if (timestamp == Long.MIN_VALUE)
throw new InvalidRequestException(String.format("Out of bound timestamp, must be in [%d, %d]", Long.MIN_VALUE + 1, Long.MAX_VALUE));
}
public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return AbstractCell.create(name, value, timestamp, ttl, metadata);
}
public Cell makeCounter(CellName name, long delta) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferCounterUpdateCell(name, delta, FBUtilities.timestampMicros());
}
public Cell makeTombstone(CellName name) throws InvalidRequestException
{
QueryProcessor.validateCellName(name, metadata.comparator);
return new BufferDeletedCell(name, localDeletionTime, timestamp);
}
public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
}
public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException
{
QueryProcessor.validateComposite(slice.start, metadata.comparator);
QueryProcessor.validateComposite(slice.finish, metadata.comparator);
return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
}
public List<Cell> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName)
{
if (prefetchedLists == null)
return Collections.emptyList();
CQL3Row row = prefetchedLists.get(rowKey);
return row == null ? Collections.<Cell>emptyList() : row.getCollection(cql3ColumnName);
}
}
Diff Result
No diff
Case 14 - java_cassandra.rev_86a07_41ef8..UpdateStatement.java
Base
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
import static org.apache.cassandra.cql.QueryProcessor.validateKey;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement
{
private CFDefinition cfDef;
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations;
private final List<Relation> whereClause;
// Provided for an INSERT
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
private final List<Operation> processedColumns = new ArrayList<Operation>();
private final Map<ColumnIdentifier, List<Term>> processedKeys = new HashMap<ColumnIdentifier, List<Term>>();
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public UpdateStatement(CFName name,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations,
List<Relation> whereClause,
Attributes attrs)
{
super(name, attrs);
this.operations = operations;
this.whereClause = whereClause;
this.columnNames = null;
this.columnValues = null;
}
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public UpdateStatement(CFName name,
Attributes attrs,
List<ColumnIdentifier> columnNames,
List<Term.Raw> columnValues)
{
super(name, attrs);
this.columnNames = columnNames;
this.columnValues = columnValues;
this.operations = null;
this.whereClause = null;
}
protected void validateConsistency(ConsistencyLevel cl) throws InvalidRequestException
{
if (type == Type.COUNTER)
cl.validateCounterForWrite(cfDef.cfm);
else
cl.validateForWrite(cfDef.cfm.ksName);
}
/** {@inheritDoc} */
public Collection<IMutation> getMutations(List<ByteBuffer> variables, boolean local, ConsistencyLevel cl, long now)
throws RequestExecutionException, RequestValidationException
{
List<ByteBuffer> keys = buildKeyNames(cfDef, processedKeys, variables);
ColumnNameBuilder builder = cfDef.getColumnNameBuilder();
buildColumnNames(cfDef, processedKeys, builder, variables, true);
// Lists SET operation incurs a read.
Set<ByteBuffer> toRead = null;
for (Operation op : processedColumns)
{
if (op.requiresRead())
{
if (toRead == null)
toRead = new TreeSet<ByteBuffer>(UTF8Type.instance);
toRead.add(op.columnName.key);
}
}
Map<ByteBuffer, ColumnGroupMap> rows = toRead != null ? readRows(keys, builder, toRead, (CompositeType)cfDef.cfm.comparator, local, cl) : null;
Collection<IMutation> mutations = new LinkedList<IMutation>();
UpdateParameters params = new UpdateParameters(variables, getTimestamp(now), getTimeToLive(), rows);
for (ByteBuffer key: keys)
mutations.add(mutationForKey(cfDef, key, builder, params, cl));
return mutations;
}
// Returns the first empty component or null if none are
static CFDefinition.Name buildColumnNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, ColumnNameBuilder builder, List<ByteBuffer> variables, boolean requireAllComponent)
throws InvalidRequestException
{
CFDefinition.Name firstEmpty = null;
for (CFDefinition.Name name : cfDef.columns.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
{
firstEmpty = name;
if (requireAllComponent && cfDef.isComposite && !cfDef.isCompact)
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
}
else if (firstEmpty != null)
{
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s since %s is set", firstEmpty.name, name.name));
}
else
{
assert values.size() == 1; // We only allow IN for row keys so far
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", name));
builder.add(val);
}
}
return firstEmpty;
}
static List<ByteBuffer> buildKeyNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, List<ByteBuffer> variables)
throws InvalidRequestException
{
ColumnNameBuilder keyBuilder = cfDef.getKeyNameBuilder();
List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
for (CFDefinition.Name name : cfDef.keys.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
if (keyBuilder.remainingCount() == 1)
{
for (Term t : values)
{
ByteBuffer val = t.bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keys.add(keyBuilder.copy().add(val).build());
}
}
else
{
if (values.size() > 1)
throw new InvalidRequestException("IN is only supported on the last column of the partition key");
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keyBuilder.add(val);
}
}
return keys;
}
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl)
throws InvalidRequestException
{
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm.cfName);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact)
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
public ParsedStatement.Prepared prepare(ColumnSpecification[] boundNames) throws InvalidRequestException
{
// Deal here with the keyspace overwrite thingy to avoid mistake
CFMetaData metadata = validateColumnFamily(keyspace(), columnFamily());
cfDef = metadata.getCfDef();
type = metadata.getDefaultValidator().isCommutative() ? Type.COUNTER : Type.LOGGED;
if (operations == null)
{
// Created from an INSERT
if (type == Type.COUNTER)
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
// For UPDATES, the parser validates we don't set the same value twice but we must check it here for INSERT
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
if (processedKeys.put(name.name, Collections.singletonList(t)) != null)
throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", name));
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
processedColumns.add(operation);
break;
}
}
}
else
{
// Created from an UPDATE
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : operations)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
processedColumns.add(operation);
break;
}
}
processKeys(cfDef, whereClause, processedKeys, boundNames);
}
return new ParsedStatement.Prepared(this, Arrays.<ColumnSpecification>asList(boundNames));
}
public ParsedStatement.Prepared prepare() throws InvalidRequestException
{
ColumnSpecification[] names = new ColumnSpecification[getBoundsTerms()];
return prepare(names);
}
// Reused by DeleteStatement
static void processKeys(CFDefinition cfDef, List<Relation> keys, Map<ColumnIdentifier, List<Term>> processed, ColumnSpecification[] names) throws InvalidRequestException
{
for (Relation rel : keys)
{
CFDefinition.Name name = cfDef.get(rel.getEntity());
if (name == null)
throw new InvalidRequestException(String.format("Unknown key identifier %s", rel.getEntity()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
List<Term.Raw> rawValues;
if (rel.operator() == Relation.Type.EQ)
rawValues = Collections.singletonList(rel.getValue());
else if (name.kind == CFDefinition.Name.Kind.KEY_ALIAS && rel.operator() == Relation.Type.IN)
rawValues = rel.getInValues();
else
throw new InvalidRequestException(String.format("Invalid operator %s for PRIMARY KEY part %s", rel.operator(), name));
List<Term> values = new ArrayList<Term>(rawValues.size());
for (Term.Raw raw : rawValues)
{
Term t = raw.prepare(name);
t.collectMarkerSpecification(names);
values.add(t);
}
if (processed.put(name.name, values) != null)
throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", name));
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Non PRIMARY KEY %s found in where clause", name));
}
}
}
public String toString()
{
return String.format("UpdateStatement(name=%s, keys=%s, columns=%s, timestamp=%s, timeToLive=%s)",
cfName,
whereClause,
operations,
isSetTimestamp() ? getTimestamp(-1) : "<now>",
getTimeToLive());
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
import static org.apache.cassandra.cql.QueryProcessor.validateKey;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement
{
private CFDefinition cfDef;
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations;
private final List<Relation> whereClause;
// Provided for an INSERT
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
private final List<Operation> processedColumns = new ArrayList<Operation>();
private final Map<ColumnIdentifier, List<Term>> processedKeys = new HashMap<ColumnIdentifier, List<Term>>();
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public UpdateStatement(CFName name,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations,
List<Relation> whereClause,
Attributes attrs)
{
super(name, attrs);
this.operations = operations;
this.whereClause = whereClause;
this.columnNames = null;
this.columnValues = null;
}
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public UpdateStatement(CFName name,
Attributes attrs,
List<ColumnIdentifier> columnNames,
List<Term.Raw> columnValues)
{
super(name, attrs);
this.columnNames = columnNames;
this.columnValues = columnValues;
this.operations = null;
this.whereClause = null;
}
protected void validateConsistency(ConsistencyLevel cl) throws InvalidRequestException
{
if (type == Type.COUNTER)
cl.validateCounterForWrite(cfDef.cfm);
else
cl.validateForWrite(cfDef.cfm.ksName);
}
/** {@inheritDoc} */
public Collection<IMutation> getMutations(List<ByteBuffer> variables, boolean local, ConsistencyLevel cl, long now)
throws RequestExecutionException, RequestValidationException
{
List<ByteBuffer> keys = buildKeyNames(cfDef, processedKeys, variables);
ColumnNameBuilder builder = cfDef.getColumnNameBuilder();
buildColumnNames(cfDef, processedKeys, builder, variables, true);
// Lists SET operation incurs a read.
Set<ByteBuffer> toRead = null;
for (Operation op : processedColumns)
{
if (op.requiresRead())
{
if (toRead == null)
toRead = new TreeSet<ByteBuffer>(UTF8Type.instance);
toRead.add(op.columnName.key);
}
}
Map<ByteBuffer, ColumnGroupMap> rows = toRead != null ? readRows(keys, builder, toRead, (CompositeType)cfDef.cfm.comparator, local, cl) : null;
Collection<IMutation> mutations = new LinkedList<IMutation>();
UpdateParameters params = new UpdateParameters(variables, getTimestamp(now), getTimeToLive(), rows);
for (ByteBuffer key: keys)
mutations.add(mutationForKey(cfDef, key, builder, params, cl));
return mutations;
}
// Returns the first empty component or null if none are
static CFDefinition.Name buildColumnNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, ColumnNameBuilder builder, List<ByteBuffer> variables, boolean requireAllComponent)
throws InvalidRequestException
{
CFDefinition.Name firstEmpty = null;
for (CFDefinition.Name name : cfDef.columns.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
{
firstEmpty = name;
if (requireAllComponent && cfDef.isComposite && !cfDef.isCompact)
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
}
else if (firstEmpty != null)
{
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s since %s is set", firstEmpty.name, name.name));
}
else
{
assert values.size() == 1; // We only allow IN for row keys so far
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", name));
builder.add(val);
}
}
return firstEmpty;
}
static List<ByteBuffer> buildKeyNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, List<ByteBuffer> variables)
throws InvalidRequestException
{
ColumnNameBuilder keyBuilder = cfDef.getKeyNameBuilder();
List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
for (CFDefinition.Name name : cfDef.keys.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
if (keyBuilder.remainingCount() == 1)
{
for (Term t : values)
{
ByteBuffer val = t.bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keys.add(keyBuilder.copy().add(val).build());
}
}
else
{
if (values.size() > 1)
throw new InvalidRequestException("IN is only supported on the last column of the partition key");
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keyBuilder.add(val);
}
}
return keys;
}
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl)
throws InvalidRequestException
{
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm.cfName);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact)
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
public ParsedStatement.Prepared prepare(ColumnSpecification[] boundNames) throws InvalidRequestException
{
// Deal here with the keyspace overwrite thingy to avoid mistake
CFMetaData metadata = validateColumnFamily(keyspace(), columnFamily());
cfDef = metadata.getCfDef();
type = metadata.getDefaultValidator().isCommutative() ? Type.COUNTER : Type.LOGGED;
if (operations == null)
{
// Created from an INSERT
if (type == Type.COUNTER)
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
// For UPDATES, the parser validates we don't set the same value twice but we must check it here for INSERT
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
if (processedKeys.put(name.name, Collections.singletonList(t)) != null)
throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", name));
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
processedColumns.add(operation);
break;
}
}
}
else
{
// Created from an UPDATE
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : operations)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
processedColumns.add(operation);
break;
}
}
processKeys(cfDef, whereClause, processedKeys, boundNames);
}
return new ParsedStatement.Prepared(this, Arrays.<ColumnSpecification>asList(boundNames));
}
public ParsedStatement.Prepared prepare() throws InvalidRequestException
{
ColumnSpecification[] names = new ColumnSpecification[getBoundsTerms()];
return prepare(names);
}
// Reused by DeleteStatement
static void processKeys(CFDefinition cfDef, List<Relation> keys, Map<ColumnIdentifier, List<Term>> processed, ColumnSpecification[] names) throws InvalidRequestException
{
for (Relation rel : keys)
{
CFDefinition.Name name = cfDef.get(rel.getEntity());
if (name == null)
throw new InvalidRequestException(String.format("Unknown key identifier %s", rel.getEntity()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
List<Term.Raw> rawValues;
if (rel.operator() == Relation.Type.EQ)
rawValues = Collections.singletonList(rel.getValue());
else if (name.kind == CFDefinition.Name.Kind.KEY_ALIAS && rel.operator() == Relation.Type.IN)
rawValues = rel.getInValues();
else
throw new InvalidRequestException(String.format("Invalid operator %s for PRIMARY KEY part %s", rel.operator(), name));
List<Term> values = new ArrayList<Term>(rawValues.size());
for (Term.Raw raw : rawValues)
{
Term t = raw.prepare(name);
t.collectMarkerSpecification(names);
values.add(t);
}
if (processed.put(name.name, values) != null)
throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", name));
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Non PRIMARY KEY %s found in where clause", name));
}
}
}
public String toString()
{
return String.format("UpdateStatement(name=%s, keys=%s, columns=%s, timestamp=%s, timeToLive=%s)",
cfName,
whereClause,
operations,
isSetTimestamp() ? getTimestamp(-1) : "<now>",
getTimeToLive());
}
}
Left
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
import static org.apache.cassandra.cql.QueryProcessor.validateKey;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement
{
private CFDefinition cfDef;
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations;
private final List<Relation> whereClause;
// Provided for an INSERT
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
private final List<Operation> processedColumns = new ArrayList<Operation>();
private final Map<ColumnIdentifier, List<Term>> processedKeys = new HashMap<ColumnIdentifier, List<Term>>();
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public UpdateStatement(CFName name,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations,
List<Relation> whereClause,
Attributes attrs)
{
super(name, attrs);
this.operations = operations;
this.whereClause = whereClause;
this.columnNames = null;
this.columnValues = null;
}
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public UpdateStatement(CFName name,
Attributes attrs,
List<ColumnIdentifier> columnNames,
List<Term.Raw> columnValues)
{
super(name, attrs);
this.columnNames = columnNames;
this.columnValues = columnValues;
this.operations = null;
this.whereClause = null;
}
protected void validateConsistency(ConsistencyLevel cl) throws InvalidRequestException
{
if (type == Type.COUNTER)
cl.validateCounterForWrite(cfDef.cfm);
else
cl.validateForWrite(cfDef.cfm.ksName);
}
/** {@inheritDoc} */
public Collection<IMutation> getMutations(List<ByteBuffer> variables, boolean local, ConsistencyLevel cl, long now)
throws RequestExecutionException, RequestValidationException
{
List<ByteBuffer> keys = buildKeyNames(cfDef, processedKeys, variables);
ColumnNameBuilder builder = cfDef.getColumnNameBuilder();
buildColumnNames(cfDef, processedKeys, builder, variables, true);
// Lists SET operation incurs a read.
Set<ByteBuffer> toRead = null;
for (Operation op : processedColumns)
{
if (op.requiresRead())
{
if (toRead == null)
toRead = new TreeSet<ByteBuffer>(UTF8Type.instance);
toRead.add(op.columnName.key);
}
}
Map<ByteBuffer, ColumnGroupMap> rows = toRead != null ? readRows(keys, builder, toRead, (CompositeType)cfDef.cfm.comparator, local, cl) : null;
Collection<IMutation> mutations = new LinkedList<IMutation>();
UpdateParameters params = new UpdateParameters(variables, getTimestamp(now), getTimeToLive(), rows);
for (ByteBuffer key: keys)
mutations.add(mutationForKey(cfDef, key, builder, params, cl));
return mutations;
}
// Returns the first empty component or null if none are
static CFDefinition.Name buildColumnNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, ColumnNameBuilder builder, List<ByteBuffer> variables, boolean requireAllComponent)
throws InvalidRequestException
{
CFDefinition.Name firstEmpty = null;
for (CFDefinition.Name name : cfDef.columns.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
{
firstEmpty = name;
if (requireAllComponent && cfDef.isComposite && !cfDef.isCompact)
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
}
else if (firstEmpty != null)
{
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s since %s is set", firstEmpty.name, name.name));
}
else
{
assert values.size() == 1; // We only allow IN for row keys so far
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", name));
builder.add(val);
}
}
return firstEmpty;
}
static List<ByteBuffer> buildKeyNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, List<ByteBuffer> variables)
throws InvalidRequestException
{
ColumnNameBuilder keyBuilder = cfDef.getKeyNameBuilder();
List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
for (CFDefinition.Name name : cfDef.keys.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
if (keyBuilder.remainingCount() == 1)
{
for (Term t : values)
{
ByteBuffer val = t.bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keys.add(keyBuilder.copy().add(val).build());
}
}
else
{
if (values.size() > 1)
throw new InvalidRequestException("IN is only supported on the last column of the partition key");
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keyBuilder.add(val);
}
}
return keys;
}
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl)
throws InvalidRequestException
{
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact)
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
public ParsedStatement.Prepared prepare(ColumnSpecification[] boundNames) throws InvalidRequestException
{
// Deal here with the keyspace overwrite thingy to avoid mistake
CFMetaData metadata = validateColumnFamily(keyspace(), columnFamily());
cfDef = metadata.getCfDef();
type = metadata.getDefaultValidator().isCommutative() ? Type.COUNTER : Type.LOGGED;
if (operations == null)
{
// Created from an INSERT
if (type == Type.COUNTER)
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
// For UPDATES, the parser validates we don't set the same value twice but we must check it here for INSERT
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
if (processedKeys.put(name.name, Collections.singletonList(t)) != null)
throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", name));
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
processedColumns.add(operation);
break;
}
}
}
else
{
// Created from an UPDATE
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : operations)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
processedColumns.add(operation);
break;
}
}
processKeys(cfDef, whereClause, processedKeys, boundNames);
}
return new ParsedStatement.Prepared(this, Arrays.<ColumnSpecification>asList(boundNames));
}
public ParsedStatement.Prepared prepare() throws InvalidRequestException
{
ColumnSpecification[] names = new ColumnSpecification[getBoundsTerms()];
return prepare(names);
}
// Reused by DeleteStatement
static void processKeys(CFDefinition cfDef, List<Relation> keys, Map<ColumnIdentifier, List<Term>> processed, ColumnSpecification[] names) throws InvalidRequestException
{
for (Relation rel : keys)
{
CFDefinition.Name name = cfDef.get(rel.getEntity());
if (name == null)
throw new InvalidRequestException(String.format("Unknown key identifier %s", rel.getEntity()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
List<Term.Raw> rawValues;
if (rel.operator() == Relation.Type.EQ)
rawValues = Collections.singletonList(rel.getValue());
else if (name.kind == CFDefinition.Name.Kind.KEY_ALIAS && rel.operator() == Relation.Type.IN)
rawValues = rel.getInValues();
else
throw new InvalidRequestException(String.format("Invalid operator %s for PRIMARY KEY part %s", rel.operator(), name));
List<Term> values = new ArrayList<Term>(rawValues.size());
for (Term.Raw raw : rawValues)
{
Term t = raw.prepare(name);
t.collectMarkerSpecification(names);
values.add(t);
}
if (processed.put(name.name, values) != null)
throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", name));
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Non PRIMARY KEY %s found in where clause", name));
}
}
}
public String toString()
{
return String.format("UpdateStatement(name=%s, keys=%s, columns=%s, timestamp=%s, timeToLive=%s)",
cfName,
whereClause,
operations,
isSetTimestamp() ? getTimestamp(-1) : "<now>",
getTimeToLive());
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
import static org.apache.cassandra.cql.QueryProcessor.validateKey;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement
{
private CFDefinition cfDef;
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations;
private final List<Relation> whereClause;
// Provided for an INSERT
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
private final List<Operation> processedColumns = new ArrayList<Operation>();
private final Map<ColumnIdentifier, List<Term>> processedKeys = new HashMap<ColumnIdentifier, List<Term>>();
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public UpdateStatement(CFName name,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations,
List<Relation> whereClause,
Attributes attrs)
{
super(name, attrs);
this.operations = operations;
this.whereClause = whereClause;
this.columnNames = null;
this.columnValues = null;
}
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public UpdateStatement(CFName name,
Attributes attrs,
List<ColumnIdentifier> columnNames,
List<Term.Raw> columnValues)
{
super(name, attrs);
this.columnNames = columnNames;
this.columnValues = columnValues;
this.operations = null;
this.whereClause = null;
}
protected void validateConsistency(ConsistencyLevel cl) throws InvalidRequestException
{
if (type == Type.COUNTER)
cl.validateCounterForWrite(cfDef.cfm);
else
cl.validateForWrite(cfDef.cfm.ksName);
}
/** {@inheritDoc} */
public Collection<IMutation> getMutations(List<ByteBuffer> variables, boolean local, ConsistencyLevel cl, long now)
throws RequestExecutionException, RequestValidationException
{
List<ByteBuffer> keys = buildKeyNames(cfDef, processedKeys, variables);
ColumnNameBuilder builder = cfDef.getColumnNameBuilder();
buildColumnNames(cfDef, processedKeys, builder, variables, true);
// Lists SET operation incurs a read.
Set<ByteBuffer> toRead = null;
for (Operation op : processedColumns)
{
if (op.requiresRead())
{
if (toRead == null)
toRead = new TreeSet<ByteBuffer>(UTF8Type.instance);
toRead.add(op.columnName.key);
}
}
Map<ByteBuffer, ColumnGroupMap> rows = toRead != null ? readRows(keys, builder, toRead, (CompositeType)cfDef.cfm.comparator, local, cl) : null;
Collection<IMutation> mutations = new LinkedList<IMutation>();
UpdateParameters params = new UpdateParameters(variables, getTimestamp(now), getTimeToLive(), rows);
for (ByteBuffer key: keys)
mutations.add(mutationForKey(cfDef, key, builder, params, cl));
return mutations;
}
// Returns the first empty component or null if none are
static CFDefinition.Name buildColumnNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, ColumnNameBuilder builder, List<ByteBuffer> variables, boolean requireAllComponent)
throws InvalidRequestException
{
CFDefinition.Name firstEmpty = null;
for (CFDefinition.Name name : cfDef.columns.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
{
firstEmpty = name;
if (requireAllComponent && cfDef.isComposite && !cfDef.isCompact)
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
}
else if (firstEmpty != null)
{
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s since %s is set", firstEmpty.name, name.name));
}
else
{
assert values.size() == 1; // We only allow IN for row keys so far
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", name));
builder.add(val);
}
}
return firstEmpty;
}
static List<ByteBuffer> buildKeyNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, List<ByteBuffer> variables)
throws InvalidRequestException
{
ColumnNameBuilder keyBuilder = cfDef.getKeyNameBuilder();
List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
for (CFDefinition.Name name : cfDef.keys.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
if (keyBuilder.remainingCount() == 1)
{
for (Term t : values)
{
ByteBuffer val = t.bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keys.add(keyBuilder.copy().add(val).build());
}
}
else
{
if (values.size() > 1)
throw new InvalidRequestException("IN is only supported on the last column of the partition key");
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keyBuilder.add(val);
}
}
return keys;
}
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl)
throws InvalidRequestException
{
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact)
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
public ParsedStatement.Prepared prepare(ColumnSpecification[] boundNames) throws InvalidRequestException
{
// Deal here with the keyspace overwrite thingy to avoid mistake
CFMetaData metadata = validateColumnFamily(keyspace(), columnFamily());
cfDef = metadata.getCfDef();
type = metadata.getDefaultValidator().isCommutative() ? Type.COUNTER : Type.LOGGED;
if (operations == null)
{
// Created from an INSERT
if (type == Type.COUNTER)
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
// For UPDATES, the parser validates we don't set the same value twice but we must check it here for INSERT
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
if (processedKeys.put(name.name, Collections.singletonList(t)) != null)
throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", name));
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
processedColumns.add(operation);
break;
}
}
}
else
{
// Created from an UPDATE
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : operations)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
processedColumns.add(operation);
break;
}
}
processKeys(cfDef, whereClause, processedKeys, boundNames);
}
return new ParsedStatement.Prepared(this, Arrays.<ColumnSpecification>asList(boundNames));
}
public ParsedStatement.Prepared prepare() throws InvalidRequestException
{
ColumnSpecification[] names = new ColumnSpecification[getBoundsTerms()];
return prepare(names);
}
// Reused by DeleteStatement
static void processKeys(CFDefinition cfDef, List<Relation> keys, Map<ColumnIdentifier, List<Term>> processed, ColumnSpecification[] names) throws InvalidRequestException
{
for (Relation rel : keys)
{
CFDefinition.Name name = cfDef.get(rel.getEntity());
if (name == null)
throw new InvalidRequestException(String.format("Unknown key identifier %s", rel.getEntity()));
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
List<Term.Raw> rawValues;
if (rel.operator() == Relation.Type.EQ)
rawValues = Collections.singletonList(rel.getValue());
else if (name.kind == CFDefinition.Name.Kind.KEY_ALIAS && rel.operator() == Relation.Type.IN)
rawValues = rel.getInValues();
else
throw new InvalidRequestException(String.format("Invalid operator %s for PRIMARY KEY part %s", rel.operator(), name));
List<Term> values = new ArrayList<Term>(rawValues.size());
for (Term.Raw raw : rawValues)
{
Term t = raw.prepare(name);
t.collectMarkerSpecification(names);
values.add(t);
}
if (processed.put(name.name, values) != null)
throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", name));
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
throw new InvalidRequestException(String.format("Non PRIMARY KEY %s found in where clause", name));
}
}
}
public String toString()
{
return String.format("UpdateStatement(name=%s, keys=%s, columns=%s, timestamp=%s, timeToLive=%s)",
cfName,
whereClause,
operations,
isSetTimestamp() ? getTimestamp(-1) : "<now>",
getTimeToLive());
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement
{
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs)
{
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey()
{
return true;
}
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
throws InvalidRequestException
{
CFDefinition cfDef = cfm.getCfDef();
ColumnFamily cf = UnsortedColumns.factory.create(cfm);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
//
// We never insert markers for Super CF as this would confuse the thrift side.
if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper())
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
List<Operation> updates = getOperations();
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert updates.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (updates.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
return cf;
}
public static class ParsedInsert extends ModificationStatement.Parsed
{
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name,
Attributes.Raw attrs,
List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues,
boolean ifNotExists)
{
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed
{
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name,
Attributes.Raw attrs,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates,
List<Relation> whereClause,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions)
{
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement
{
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs)
{
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey()
{
return true;
}
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
throws InvalidRequestException
{
CFDefinition cfDef = cfm.getCfDef();
ColumnFamily cf = UnsortedColumns.factory.create(cfm);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
//
// We never insert markers for Super CF as this would confuse the thrift side.
if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper())
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
List<Operation> updates = getOperations();
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert updates.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (updates.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
return cf;
}
public static class ParsedInsert extends ModificationStatement.Parsed
{
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name,
Attributes.Raw attrs,
List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues,
boolean ifNotExists)
{
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed
{
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name,
Attributes.Raw attrs,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates,
List<Relation> whereClause,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions)
{
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
MergeMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement {
// Provided for an UPDATE
// Provided for an INSERT
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/** {@inheritDoc} */
// Returns the first empty component or null if none are
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params) throws InvalidRequestException {
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact) {
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact) {
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null) {
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
} else {
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns) op.execute(key, cf, builder.copy(), params);
}
} else {
for (Operation op : processedColumns) op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs) {
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey() {
return true;
}
public static class ParsedInsert extends ModificationStatement.Parsed {
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name, Attributes.Raw attrs, List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues, boolean ifNotExists) {
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException {
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++) {
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++) if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed {
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name, Attributes.Raw attrs, List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates, List<Relation> whereClause, List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions) {
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException {
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates) {
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement {
// Provided for an UPDATE
// Provided for an INSERT
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/** {@inheritDoc} */
// Returns the first empty component or null if none are
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params) throws InvalidRequestException {
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact) {
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact) {
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null) {
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
} else {
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns) op.execute(key, cf, builder.copy(), params);
}
} else {
for (Operation op : processedColumns) op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs) {
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey() {
return true;
}
public static class ParsedInsert extends ModificationStatement.Parsed {
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name, Attributes.Raw attrs, List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues, boolean ifNotExists) {
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException {
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++) {
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++) if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed {
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name, Attributes.Raw attrs, List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates, List<Relation> whereClause, List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions) {
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException {
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates) {
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
KeepBothMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement {
// Provided for an UPDATE
// Provided for an INSERT
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/** {@inheritDoc} */
// Returns the first empty component or null if none are
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl) throws InvalidRequestException {
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact) {
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact) {
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null) {
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
} else {
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns) op.execute(key, cf, builder.copy(), params);
}
} else {
for (Operation op : processedColumns) op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs) {
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey() {
return true;
}
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params) throws InvalidRequestException {
CFDefinition cfDef = cfm.getCfDef();
ColumnFamily cf = UnsortedColumns.factory.create(cfm);
// We never insert markers for Super CF as this would confuse the thrift side.
if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper()) {
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
List<Operation> updates = getOperations();
if (cfDef.isCompact) {
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null) {
// have passed through validation
assert updates.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
} else {
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (updates.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation update : updates) update.execute(key, cf, builder.copy(), params);
}
} else {
for (Operation update : updates) update.execute(key, cf, builder.copy(), params);
}
return cf;
}
public static class ParsedInsert extends ModificationStatement.Parsed {
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name, Attributes.Raw attrs, List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues, boolean ifNotExists) {
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException {
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++) {
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++) if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed {
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name, Attributes.Raw attrs, List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates, List<Relation> whereClause, List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions) {
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException {
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates) {
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement {
// Provided for an UPDATE
// Provided for an INSERT
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/** {@inheritDoc} */
// Returns the first empty component or null if none are
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl) throws InvalidRequestException {
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact) {
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact) {
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null) {
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
} else {
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns) op.execute(key, cf, builder.copy(), params);
}
} else {
for (Operation op : processedColumns) op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs) {
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey() {
return true;
}
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params) throws InvalidRequestException {
CFDefinition cfDef = cfm.getCfDef();
ColumnFamily cf = UnsortedColumns.factory.create(cfm);
// We never insert markers for Super CF as this would confuse the thrift side.
if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper()) {
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
List<Operation> updates = getOperations();
if (cfDef.isCompact) {
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null) {
// have passed through validation
assert updates.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
} else {
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (updates.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation update : updates) update.execute(key, cf, builder.copy(), params);
}
} else {
for (Operation update : updates) update.execute(key, cf, builder.copy(), params);
}
return cf;
}
public static class ParsedInsert extends ModificationStatement.Parsed {
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name, Attributes.Raw attrs, List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues, boolean ifNotExists) {
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException {
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++) {
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++) if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed {
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name, Attributes.Raw attrs, List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates, List<Relation> whereClause, List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions) {
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException {
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates) {
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch(name.kind) {
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
Safe
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement {
// Provided for an UPDATE
// Provided for an INSERT
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/** {@inheritDoc} */
// Returns the first empty component or null if none are
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
<<<<<<< MINE
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
throws InvalidRequestException
{
CFDefinition cfDef = cfm.getCfDef();
ColumnFamily cf = UnsortedColumns.factory.create(cfm);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
//
// We never insert markers for Super CF as this would confuse the thrift side.
if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper())
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
List<Operation> updates = getOperations();
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert updates.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (updates.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
return cf;
}
=======
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl)
throws InvalidRequestException
{
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact)
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
>>>>>>> YOURS
// Reused by DeleteStatement
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs)
{
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey()
{
return true;
}
public static class ParsedInsert extends ModificationStatement.Parsed {
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name,
Attributes.Raw attrs,
List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues,
boolean ifNotExists)
{
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed {
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name,
Attributes.Raw attrs,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates,
List<Relation> whereClause,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions)
{
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement {
// Provided for an UPDATE
// Provided for an INSERT
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param operations a map of column operations to perform
* @param whereClause the where clause
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/**
* Creates a new UpdateStatement from a column family name, a consistency level,
* key, and lists of column names and values. It is intended for use with the
* alternate update format, <code>INSERT</code>.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
/** {@inheritDoc} */
// Returns the first empty component or null if none are
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
<<<<<<< MINE
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
throws InvalidRequestException
{
CFDefinition cfDef = cfm.getCfDef();
ColumnFamily cf = UnsortedColumns.factory.create(cfm);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
//
// We never insert markers for Super CF as this would confuse the thrift side.
if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper())
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
List<Operation> updates = getOperations();
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert updates.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (updates.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
return cf;
}
=======
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl)
throws InvalidRequestException
{
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
if (cfDef.isComposite && !cfDef.isCompact)
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert processedColumns.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (processedColumns.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation op : processedColumns)
op.execute(key, cf, builder.copy(), params);
}
return type == Type.COUNTER ? new CounterMutation(rm, cl) : rm;
}
>>>>>>> YOURS
// Reused by DeleteStatement
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs)
{
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey()
{
return true;
}
public static class ParsedInsert extends ModificationStatement.Parsed {
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name,
Attributes.Raw attrs,
List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues,
boolean ifNotExists)
{
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed {
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name,
Attributes.Raw attrs,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates,
List<Relation> whereClause,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions)
{
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement
{
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs)
{
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey()
{
return true;
}
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
throws InvalidRequestException
{
<<<<<<< MINE
CFDefinition.Name firstEmpty = null;
for (CFDefinition.Name name : cfDef.columns.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
{
firstEmpty = name;
if (requireAllComponent && cfDef.isComposite && !cfDef.isCompact)
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
}
else if (firstEmpty != null)
{
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s since %s is set", firstEmpty.name, name.name));
}
else
{
assert values.size() == 1; // We only allow IN for row keys so far
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", name));
builder.add(val);
}
}
return firstEmpty;
}
static List<ByteBuffer> buildKeyNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, List<ByteBuffer> variables)
throws InvalidRequestException
{
ColumnNameBuilder keyBuilder = cfDef.getKeyNameBuilder();
List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
for (CFDefinition.Name name : cfDef.keys.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
if (keyBuilder.remainingCount() == 1)
{
for (Term t : values)
{
ByteBuffer val = t.bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keys.add(keyBuilder.copy().add(val).build());
}
}
else
{
if (values.size() > 1)
throw new InvalidRequestException("IN is only supported on the last column of the partition key");
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keyBuilder.add(val);
}
}
return keys;
}
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl)
throws InvalidRequestException
{
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
=======
CFDefinition cfDef = cfm.getCfDef();
ColumnFamily cf = UnsortedColumns.factory.create(cfm);
>>>>>>> YOURS
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
//
// We never insert markers for Super CF as this would confuse the thrift side.
if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper())
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
List<Operation> updates = getOperations();
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert updates.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (updates.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
return cf;
}
public static class ParsedInsert extends ModificationStatement.Parsed
{
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name,
Attributes.Raw attrs,
List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues,
boolean ifNotExists)
{
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed
{
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name,
Attributes.Raw attrs,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates,
List<Relation> whereClause,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions)
{
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.cql3.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.*;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Pair;
/**
* An <code>UPDATE</code> statement parsed from a CQL query statement.
*
*/
public class UpdateStatement extends ModificationStatement
{
private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs)
{
super(boundTerms, cfm, attrs);
}
protected boolean requireFullClusteringKey()
{
return true;
}
public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
throws InvalidRequestException
{
<<<<<<< MINE
CFDefinition.Name firstEmpty = null;
for (CFDefinition.Name name : cfDef.columns.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
{
firstEmpty = name;
if (requireAllComponent && cfDef.isComposite && !cfDef.isCompact)
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
}
else if (firstEmpty != null)
{
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s since %s is set", firstEmpty.name, name.name));
}
else
{
assert values.size() == 1; // We only allow IN for row keys so far
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", name));
builder.add(val);
}
}
return firstEmpty;
}
static List<ByteBuffer> buildKeyNames(CFDefinition cfDef, Map<ColumnIdentifier, List<Term>> processed, List<ByteBuffer> variables)
throws InvalidRequestException
{
ColumnNameBuilder keyBuilder = cfDef.getKeyNameBuilder();
List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
for (CFDefinition.Name name : cfDef.keys.values())
{
List<Term> values = processed.get(name.name);
if (values == null || values.isEmpty())
throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", name));
if (keyBuilder.remainingCount() == 1)
{
for (Term t : values)
{
ByteBuffer val = t.bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keys.add(keyBuilder.copy().add(val).build());
}
}
else
{
if (values.size() > 1)
throw new InvalidRequestException("IN is only supported on the last column of the partition key");
ByteBuffer val = values.get(0).bindAndGet(variables);
if (val == null)
throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name));
keyBuilder.add(val);
}
}
return keys;
}
/**
* Compute a row mutation for a single key
*
* @return row mutation
*
* @throws InvalidRequestException on the wrong request
*/
private IMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params, ConsistencyLevel cl)
throws InvalidRequestException
{
validateKey(key);
QueryProcessor.validateKey(key);
RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
ColumnFamily cf = rm.addOrGet(cfDef.cfm);
=======
CFDefinition cfDef = cfm.getCfDef();
ColumnFamily cf = UnsortedColumns.factory.create(cfm);
>>>>>>> YOURS
// Inserting the CQL row marker (see #4361)
// We always need to insert a marker, because of the following situation:
// CREATE TABLE t ( k int PRIMARY KEY, c text );
// INSERT INTO t(k, c) VALUES (1, 1)
// DELETE c FROM t WHERE k = 1;
// SELECT * FROM t;
// The last query should return one row (but with c == null). Adding
// the marker with the insert make sure the semantic is correct (while making sure a
// 'DELETE FROM t WHERE k = 1' does remove the row entirely)
//
// We never insert markers for Super CF as this would confuse the thrift side.
if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper())
{
ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
}
List<Operation> updates = getOperations();
if (cfDef.isCompact)
{
if (builder.componentCount() == 0)
throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));
if (cfDef.value == null)
{
// compact + no compact value implies there is no column outside the PK. So no operation could
// have passed through validation
assert updates.isEmpty();
setToEmptyOperation.execute(key, cf, builder.copy(), params);
}
else
{
// compact means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
if (updates.isEmpty())
throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
}
else
{
for (Operation update : updates)
update.execute(key, cf, builder.copy(), params);
}
return cf;
}
public static class ParsedInsert extends ModificationStatement.Parsed
{
private final List<ColumnIdentifier> columnNames;
private final List<Term.Raw> columnValues;
/**
* A parsed <code>INSERT</code> statement.
*
* @param name column family being operated on
* @param columnNames list of column names
* @param columnValues list of column values (corresponds to names)
* @param attrs additional attributes for statement (CL, timestamp, timeToLive)
*/
public ParsedInsert(CFName name,
Attributes.Raw attrs,
List<ColumnIdentifier> columnNames, List<Term.Raw> columnValues,
boolean ifNotExists)
{
super(name, attrs, null, ifNotExists);
this.columnNames = columnNames;
this.columnValues = columnValues;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
// Created from an INSERT
if (stmt.isCounter())
throw new InvalidRequestException("INSERT statement are not allowed on counter tables, use UPDATE instead");
if (columnNames.size() != columnValues.size())
throw new InvalidRequestException("Unmatched column names/values");
if (columnNames.isEmpty())
throw new InvalidRequestException("No columns provided to INSERT");
for (int i = 0; i < columnNames.size(); i++)
{
CFDefinition.Name name = cfDef.get(columnNames.get(i));
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", columnNames.get(i)));
for (int j = 0; j < i; j++)
if (name.name.equals(columnNames.get(j)))
throw new InvalidRequestException(String.format("Multiple definitions found for column %s", name));
Term.Raw value = columnValues.get(i);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
Term t = value.prepare(name);
t.collectMarkerSpecification(boundNames);
stmt.addKeyValue(name.name, t);
break;
case VALUE_ALIAS:
case COLUMN_METADATA:
Operation operation = new Operation.SetValue(value).prepare(name);
operation.collectMarkerSpecification(boundNames);
stmt.addOperation(operation);
break;
}
}
return stmt;
}
}
public static class ParsedUpdate extends ModificationStatement.Parsed
{
// Provided for an UPDATE
private final List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates;
private final List<Relation> whereClause;
/**
* Creates a new UpdateStatement from a column family name, columns map, consistency
* level, and key term.
*
* @param name column family being operated on
* @param attrs additional attributes for statement (timestamp, timeToLive)
* @param updates a map of column operations to perform
* @param whereClause the where clause
*/
public ParsedUpdate(CFName name,
Attributes.Raw attrs,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> updates,
List<Relation> whereClause,
List<Pair<ColumnIdentifier, Operation.RawUpdate>> conditions)
{
super(name, attrs, conditions, false);
this.updates = updates;
this.whereClause = whereClause;
}
protected ModificationStatement prepareInternal(CFDefinition cfDef, ColumnSpecification[] boundNames, Attributes attrs) throws InvalidRequestException
{
UpdateStatement stmt = new UpdateStatement(getBoundsTerms(), cfDef.cfm, attrs);
for (Pair<ColumnIdentifier, Operation.RawUpdate> entry : updates)
{
CFDefinition.Name name = cfDef.get(entry.left);
if (name == null)
throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
Operation operation = entry.right.prepare(name);
operation.collectMarkerSpecification(boundNames);
switch (name.kind)
{
case KEY_ALIAS:
case COLUMN_ALIAS:
throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
case VALUE_ALIAS:
case COLUMN_METADATA:
stmt.addOperation(operation);
break;
}
}
stmt.processWhereClause(whereClause, boundNames);
return stmt;
}
}
}
Diff Result
No diff
Case 15 - java_cassandra.rev_95cf9_ce642..SecondaryIndexManager.java
Base
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.IndexExpression;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(IColumn column) { }
public void update(IColumn oldColumn, IColumn column) { }
public void remove(IColumn current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumn_metadata().get(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
for (ColumnDefinition cdef : baseCfs.metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return indexFor(name, indexes) != null;
}
public SecondaryIndex indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
return index;
}
return null;
}
public boolean indexes(IColumn column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public SecondaryIndex indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (IColumn column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<IColumn> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (IColumn column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name);
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(IColumn column);
/** called when updating the index from a memtable */
public void update(IColumn oldColumn, IColumn column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(IColumn current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater
{
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.IndexExpression;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(IColumn column) { }
public void update(IColumn oldColumn, IColumn column) { }
public void remove(IColumn current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumn_metadata().get(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
for (ColumnDefinition cdef : baseCfs.metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return indexFor(name, indexes) != null;
}
public SecondaryIndex indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
return index;
}
return null;
}
public boolean indexes(IColumn column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public SecondaryIndex indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (IColumn column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<IColumn> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (IColumn column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name);
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(IColumn column);
/** called when updating the index from a memtable */
public void update(IColumn oldColumn, IColumn column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(IColumn current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater
{
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
Left
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.IndexExpression;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(IColumn column) { }
public void update(IColumn oldColumn, IColumn column) { }
public void remove(IColumn current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumn_metadata().get(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
for (ColumnDefinition cdef : baseCfs.metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return indexFor(name, indexes) != null;
}
public SecondaryIndex indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
return index;
}
return null;
}
public boolean indexes(IColumn column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public SecondaryIndex indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (IColumn column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<IColumn> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (IColumn column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name);
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(IColumn column);
/** called when updating the index from a memtable */
public void update(IColumn oldColumn, IColumn column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(IColumn current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater
{
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
// insert the new value before removing the old one, so we never have a period
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.IndexExpression;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(IColumn column) { }
public void update(IColumn oldColumn, IColumn column) { }
public void remove(IColumn current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumn_metadata().get(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
for (ColumnDefinition cdef : baseCfs.metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
try
{
future.get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
throw new RuntimeException(e);
}
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return indexFor(name, indexes) != null;
}
public SecondaryIndex indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
return index;
}
return null;
}
public boolean indexes(IColumn column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public SecondaryIndex indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
private SecondaryIndex getIndexForFullColumnName(ByteBuffer column)
{
for (SecondaryIndex index : indexesByColumn.values())
if (index.indexes(column))
return index;
return null;
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
public ByteBuffer getColumnByIdxName(String idxName)
{
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
if (entry.getValue().getIndexName().equals(idxName))
return entry.getKey();
}
throw new RuntimeException("Unknown Index Name: " + idxName);
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (IColumn column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<IColumn> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (IColumn column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name);
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(IColumn column);
/** called when updating the index from a memtable */
public void update(IColumn oldColumn, IColumn column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(IColumn current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater
{
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
// insert the new value before removing the old one, so we never have a period
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
public void remove(IColumn column)
{
if (column.isMarkedForDelete())
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
public void remove(Column current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater
{
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
public void remove(Column current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater
{
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
MergeMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater() {
public void insert(Column column) {
}
public void update(Column oldColumn, Column column) {
}
public void remove(Column current) {
}
public void updateRowLevelIndexes() {
}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>, SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs) {
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload() {
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames) {
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns()) if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values()) {
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames() {
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values()) names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames) {
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s", idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes) {
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes) {
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes) {
if (index.indexes(name)) {
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column) {
return indexes(column.name());
}
public boolean indexes(ByteBuffer name) {
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name) {
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause) {
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers) if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column) {
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex) {
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef) {
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try {
index = SecondaryIndex.createInstance(baseCfs, cdef);
} catch (ConfigurationException e) {
throw new RuntimeException(e);
}
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex) {
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null) {
rowLevelIndexMap.put(index.getClass(), index);
index.init();
} else {
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}", cdef);
}
} else {
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column) {
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate() {
for (SecondaryIndex index : indexesByColumn.values()) index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking() {
for (SecondaryIndex index : indexesByColumn.values()) index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes() {
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet()) {
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey())) {
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs() {
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index : indexesByColumn.values()) {
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs() {
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values()) if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes() {
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize() {
long total = 0;
for (SecondaryIndex index : getIndexes()) total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf) {
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values()) {
if (index instanceof PerRowSecondaryIndex) {
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key, cf);
} else {
for (Column column : cf) {
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow) {
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow) {
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex) {
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).delete(key);
} else {
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key) {
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty()) ? nullUpdater : new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause) {
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause) {
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null) {
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values()) indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows) {
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames) {
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values()) {
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames) {
for (SecondaryIndex index : getIndexesByNames(idxNames)) index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames) {
for (SecondaryIndex index : getIndexesByNames(idxNames)) index.setIndexRemoved();
}
public boolean validate(Column column) {
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater {
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key) {
this.key = key;
}
public void update(Column oldColumn, Column column) {
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex) {
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
public void insert(Column column) {
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column) {
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes() {
for (SecondaryIndex index : rowLevelIndexMap.values()) ((PerRowSecondaryIndex) index).index(key.key);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater() {
public void insert(Column column) {
}
public void update(Column oldColumn, Column column) {
}
public void remove(Column current) {
}
public void updateRowLevelIndexes() {
}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>, SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs) {
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload() {
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames) {
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns()) if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values()) {
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames() {
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values()) names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames) {
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s", idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes) {
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes) {
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes) {
if (index.indexes(name)) {
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column) {
return indexes(column.name());
}
public boolean indexes(ByteBuffer name) {
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name) {
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause) {
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers) if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column) {
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex) {
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef) {
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try {
index = SecondaryIndex.createInstance(baseCfs, cdef);
} catch (ConfigurationException e) {
throw new RuntimeException(e);
}
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex) {
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null) {
rowLevelIndexMap.put(index.getClass(), index);
index.init();
} else {
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}", cdef);
}
} else {
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column) {
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate() {
for (SecondaryIndex index : indexesByColumn.values()) index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking() {
for (SecondaryIndex index : indexesByColumn.values()) index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes() {
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet()) {
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey())) {
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs() {
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index : indexesByColumn.values()) {
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs() {
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values()) if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes() {
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize() {
long total = 0;
for (SecondaryIndex index : getIndexes()) total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf) {
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values()) {
if (index instanceof PerRowSecondaryIndex) {
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key, cf);
} else {
for (Column column : cf) {
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow) {
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow) {
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex) {
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).delete(key);
} else {
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key) {
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty()) ? nullUpdater : new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause) {
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause) {
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null) {
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values()) indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows) {
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames) {
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values()) {
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames) {
for (SecondaryIndex index : getIndexesByNames(idxNames)) index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames) {
for (SecondaryIndex index : getIndexesByNames(idxNames)) index.setIndexRemoved();
}
public boolean validate(Column column) {
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater {
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key) {
this.key = key;
}
public void update(Column oldColumn, Column column) {
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex) {
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
public void insert(Column column) {
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column) {
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes() {
for (SecondaryIndex index : rowLevelIndexMap.values()) ((PerRowSecondaryIndex) index).index(key.key);
}
}
}
KeepBothMethods
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater() {
public void insert(Column column) {
}
public void update(Column oldColumn, Column column) {
}
public void remove(Column current) {
}
public void updateRowLevelIndexes() {
}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>, SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs) {
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload() {
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames) {
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns()) if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values()) {
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames() {
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values()) names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames) {
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s", idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes) {
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes) {
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes) {
if (index.indexes(name)) {
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column) {
return indexes(column.name());
}
public boolean indexes(ByteBuffer name) {
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name) {
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause) {
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers) if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column) {
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex) {
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef) {
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try {
index = SecondaryIndex.createInstance(baseCfs, cdef);
} catch (ConfigurationException e) {
throw new RuntimeException(e);
}
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex) {
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null) {
rowLevelIndexMap.put(index.getClass(), index);
index.init();
} else {
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}", cdef);
}
} else {
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column) {
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate() {
for (SecondaryIndex index : indexesByColumn.values()) index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking() {
for (SecondaryIndex index : indexesByColumn.values()) index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes() {
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet()) {
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey())) {
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs() {
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index : indexesByColumn.values()) {
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs() {
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values()) if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes() {
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize() {
long total = 0;
for (SecondaryIndex index : getIndexes()) total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf) {
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values()) {
if (index instanceof PerRowSecondaryIndex) {
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key, cf);
} else {
for (Column column : cf) {
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow) {
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow) {
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex) {
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).delete(key);
} else {
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key) {
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty()) ? nullUpdater : new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause) {
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause) {
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null) {
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values()) indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows) {
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames) {
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values()) {
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames) {
for (SecondaryIndex index : getIndexesByNames(idxNames)) index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames) {
for (SecondaryIndex index : getIndexesByNames(idxNames)) index.setIndexRemoved();
}
public boolean validate(Column column) {
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater {
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key) {
this.key = key;
}
public void update(IColumn oldColumn, IColumn column) {
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex) {
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
public void insert(Column column) {
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void update(Column oldColumn, Column column) {
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex) {
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
}
public void remove(Column column) {
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes() {
for (SecondaryIndex index : rowLevelIndexMap.values()) ((PerRowSecondaryIndex) index).index(key.key);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater() {
public void insert(Column column) {
}
public void update(Column oldColumn, Column column) {
}
public void remove(Column current) {
}
public void updateRowLevelIndexes() {
}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>, SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs) {
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload() {
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames) {
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns()) if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values()) {
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames() {
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values()) names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames) {
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s", idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes) {
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes) {
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes) {
if (index.indexes(name)) {
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column) {
return indexes(column.name());
}
public boolean indexes(ByteBuffer name) {
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name) {
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause) {
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers) if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column) {
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex) {
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef) {
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try {
index = SecondaryIndex.createInstance(baseCfs, cdef);
} catch (ConfigurationException e) {
throw new RuntimeException(e);
}
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex) {
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null) {
rowLevelIndexMap.put(index.getClass(), index);
index.init();
} else {
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}", cdef);
}
} else {
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column) {
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate() {
for (SecondaryIndex index : indexesByColumn.values()) index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking() {
for (SecondaryIndex index : indexesByColumn.values()) index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes() {
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet()) {
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey())) {
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs() {
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index : indexesByColumn.values()) {
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs() {
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values()) if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes() {
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize() {
long total = 0;
for (SecondaryIndex index : getIndexes()) total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf) {
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values()) {
if (index instanceof PerRowSecondaryIndex) {
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).index(key, cf);
} else {
for (Column column : cf) {
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow) {
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow) {
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex) {
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex) index).delete(key);
} else {
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key) {
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty()) ? nullUpdater : new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause) {
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause) {
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null) {
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values()) indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows) {
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames) {
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values()) {
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames) {
for (SecondaryIndex index : getIndexesByNames(idxNames)) index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames) {
for (SecondaryIndex index : getIndexesByNames(idxNames)) index.setIndexRemoved();
}
public boolean validate(Column column) {
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater {
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key) {
this.key = key;
}
public void update(IColumn oldColumn, IColumn column) {
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex) {
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
public void insert(Column column) {
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void update(Column oldColumn, Column column) {
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex) {
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
}
public void remove(Column column) {
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name())) {
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes() {
for (SecondaryIndex index : rowLevelIndexMap.values()) ((PerRowSecondaryIndex) index).index(key.key);
}
}
}
Safe
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
public void remove(Column current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
/** called when updating the index from a memtable */
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater {
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
<<<<<<< MINE
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
}
=======
public void update(IColumn oldColumn, IColumn column)
{
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
// insert the new value before removing the old one, so we never have a period
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
>>>>>>> YOURS
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager {
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
public void remove(Column current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater {
/** called when constructing the index against pre-existing data */
/** called when updating the index from a memtable */
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater {
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
<<<<<<< MINE
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
}
=======
public void update(IColumn oldColumn, IColumn column)
{
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
{
// insert the new value before removing the old one, so we never have a period
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
>>>>>>> YOURS
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
public void remove(Column current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater
{
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
<<<<<<< MINE
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
=======
for (SecondaryIndex index : indexFor(column.name()))
>>>>>>> YOURS
{
<<<<<<< MINE
// insert the new value before removing the old one, so we never have a period
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
=======
if (index instanceof PerColumnSecondaryIndex)
>>>>>>> YOURS
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ReducingKeyIterator;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.utils.FBUtilities;
/**
* Manages all the indexes associated with a given CFS
* Different types of indexes can be created across the same CF
*/
public class SecondaryIndexManager
{
private static final Logger logger = LoggerFactory.getLogger(SecondaryIndexManager.class);
public static final Updater nullUpdater = new Updater()
{
public void insert(Column column) { }
public void update(Column oldColumn, Column column) { }
public void remove(Column current) { }
public void updateRowLevelIndexes() {}
};
/**
* Organizes the indexes by column name
*/
private final ConcurrentNavigableMap<ByteBuffer, SecondaryIndex> indexesByColumn;
/**
* Keeps a single instance of a SecondaryIndex for many columns when the index type
* has isRowLevelIndex() == true
*
* This allows updates to happen to an entire row at once
*/
private final Map<Class<? extends SecondaryIndex>,SecondaryIndex> rowLevelIndexMap;
/**
* The underlying column family containing the source data for these indexes
*/
public final ColumnFamilyStore baseCfs;
public SecondaryIndexManager(ColumnFamilyStore baseCfs)
{
indexesByColumn = new ConcurrentSkipListMap<ByteBuffer, SecondaryIndex>();
rowLevelIndexMap = new HashMap<Class<? extends SecondaryIndex>, SecondaryIndex>();
this.baseCfs = baseCfs;
}
/**
* Drops and adds new indexes associated with the underlying CF
*/
public void reload()
{
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes,
// they'll need to be handled here.
Collection<ByteBuffer> indexedColumnNames = indexesByColumn.keySet();
for (ByteBuffer indexedColumn : indexedColumnNames)
{
ColumnDefinition def = baseCfs.metadata.getColumnDefinition(indexedColumn);
if (def == null || def.getIndexType() == null)
removeIndexedColumn(indexedColumn);
}
// TODO: allow all ColumnDefinition type
for (ColumnDefinition cdef : baseCfs.metadata.allColumns())
if (cdef.getIndexType() != null && !indexedColumnNames.contains(cdef.name))
addIndexedColumn(cdef);
Set<SecondaryIndex> reloadedIndexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index : indexesByColumn.values())
{
if (reloadedIndexes.add(index))
index.reload();
}
}
public Set<String> allIndexesNames()
{
Set<String> names = new HashSet<String>();
for (SecondaryIndex index : indexesByColumn.values())
names.add(index.getIndexName());
return names;
}
/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Does nothing if columns is empty.
*
* Caller must acquire and release references to the sstables used here.
*
* @param sstables the data to build from
* @param idxNames the list of columns to index, ordered by comparator
*/
public void maybeBuildSecondaryIndexes(Collection<SSTableReader> sstables, Set<String> idxNames)
{
if (idxNames.isEmpty())
return;
logger.info(String.format("Submitting index build of %s for data in %s",
idxNames, StringUtils.join(sstables, ", ")));
SecondaryIndexBuilder builder = new SecondaryIndexBuilder(baseCfs, idxNames, new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
flushIndexesBlocking();
logger.info("Index build of " + idxNames + " complete");
}
public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
return !indexFor(name, indexes).isEmpty();
}
public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
{
List<SecondaryIndex> matching = null;
for (SecondaryIndex index : indexes)
{
if (index.indexes(name))
{
if (matching == null)
matching = new ArrayList<SecondaryIndex>();
matching.add(index);
}
}
return matching == null ? Collections.<SecondaryIndex>emptyList() : matching;
}
public boolean indexes(Column column)
{
return indexes(column.name());
}
public boolean indexes(ByteBuffer name)
{
return indexes(name, indexesByColumn.values());
}
public List<SecondaryIndex> indexFor(ByteBuffer name)
{
return indexFor(name, indexesByColumn.values());
}
/**
* @return true if the indexes can handle the clause.
*/
public boolean hasIndexFor(List<IndexExpression> clause)
{
if (clause == null || clause.isEmpty())
return false;
// It doesn't seem a clause can have multiple searchers, but since
// getIndexSearchersForQuery returns a list ...
List<SecondaryIndexSearcher> searchers = getIndexSearchersForQuery(clause);
if (searchers.isEmpty())
return false;
for (SecondaryIndexSearcher searcher : searchers)
if (!searcher.isIndexing(clause))
return false;
return true;
}
/**
* Removes a existing index
* @param column the indexed column to remove
*/
public void removeIndexedColumn(ByteBuffer column)
{
SecondaryIndex index = indexesByColumn.remove(column);
if (index == null)
return;
// Remove this column from from row level index map
if (index instanceof PerRowSecondaryIndex)
{
index.removeColumnDef(column);
//If now columns left on this CF remove from row level lookup
if (index.getColumnDefs().isEmpty())
rowLevelIndexMap.remove(index.getClass());
}
index.removeIndex(column);
SystemTable.setIndexRemoved(baseCfs.metadata.ksName, index.getNameForSystemTable(column));
}
/**
* Adds and builds a index for a column
* @param cdef the column definition holding the index data
* @return a future which the caller can optionally block on signaling the index is built
*/
public synchronized Future<?> addIndexedColumn(ColumnDefinition cdef)
{
if (indexesByColumn.containsKey(cdef.name))
return null;
assert cdef.getIndexType() != null;
SecondaryIndex index;
try
{
index = SecondaryIndex.createInstance(baseCfs, cdef);
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
// Keep a single instance of the index per-cf for row level indexes
// since we want all columns to be under the index
if (index instanceof PerRowSecondaryIndex)
{
SecondaryIndex currentIndex = rowLevelIndexMap.get(index.getClass());
if (currentIndex == null)
{
rowLevelIndexMap.put(index.getClass(), index);
index.init();
}
else
{
index = currentIndex;
index.addColumnDef(cdef);
logger.info("Creating new index : {}",cdef);
}
}
else
{
// TODO: We sould do better than throw a RuntimeException
if (cdef.getIndexType() == IndexType.CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex)
throw new RuntimeException("Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed");
index.init();
}
// link in indexedColumns. this means that writes will add new data to
// the index immediately,
// so we don't have to lock everything while we do the build. it's up to
// the operator to wait
// until the index is actually built before using in queries.
indexesByColumn.put(cdef.name, index);
// if we're just linking in the index to indexedColumns on an
// already-built index post-restart, we're done
if (index.isIndexBuilt(cdef.name))
return null;
return index.buildIndexAsync();
}
/**
*
* @param column the name of indexes column
* @return the index
*/
public SecondaryIndex getIndexForColumn(ByteBuffer column)
{
return indexesByColumn.get(column);
}
/**
* Remove the index
*/
public void invalidate()
{
for (SecondaryIndex index : indexesByColumn.values())
index.invalidate();
}
/**
* Flush all indexes to disk
*/
public void flushIndexesBlocking()
{
for (SecondaryIndex index : indexesByColumn.values())
index.forceBlockingFlush();
}
/**
* @return all built indexes (ready to use)
*/
public List<String> getBuiltIndexes()
{
List<String> indexList = new ArrayList<String>();
for (Map.Entry<ByteBuffer, SecondaryIndex> entry : indexesByColumn.entrySet())
{
SecondaryIndex index = entry.getValue();
if (index.isIndexBuilt(entry.getKey()))
{
indexList.add(entry.getValue().getIndexName());
}
}
return indexList;
}
/**
* @return all CFS from indexes which use a backing CFS internally (KEYS)
*/
public Collection<ColumnFamilyStore> getIndexesBackedByCfs()
{
ArrayList<ColumnFamilyStore> cfsList = new ArrayList<ColumnFamilyStore>();
for (SecondaryIndex index: indexesByColumn.values())
{
ColumnFamilyStore cfs = index.getIndexCfs();
if (cfs != null)
cfsList.add(cfs);
}
return cfsList;
}
/**
* @return all indexes which do *not* use a backing CFS internally
*/
public Collection<SecondaryIndex> getIndexesNotBackedByCfs()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
for (SecondaryIndex index: indexesByColumn.values())
if (index.getIndexCfs() == null)
indexes.add(index);
return indexes;
}
/**
* @return all of the secondary indexes without distinction to the (non-)backed by secondary ColumnFamilyStore.
*/
public Collection<SecondaryIndex> getIndexes()
{
// we use identity map because per row indexes use same instance across many columns
Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
indexes.addAll(indexesByColumn.values());
return indexes;
}
/**
* @return total current ram size of all indexes
*/
public long getTotalLiveSize()
{
long total = 0;
for (SecondaryIndex index : getIndexes())
total += index.getLiveSize();
return total;
}
/**
* When building an index against existing data, add the given row to the index
*
* @param key the row key
* @param cf the current rows data
*/
public void indexRow(ByteBuffer key, ColumnFamily cf)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> appliedRowLevelIndexes = null;
for (SecondaryIndex index : indexesByColumn.values())
{
if (index instanceof PerRowSecondaryIndex)
{
if (appliedRowLevelIndexes == null)
appliedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (appliedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).index(key, cf);
}
else
{
for (Column column : cf)
{
if (index.indexes(column.name()))
((PerColumnSecondaryIndex) index).insert(key, column);
}
}
}
}
/**
* Delete all columns from all indexes for this row. For when cleanup rips a row out entirely.
*
* @param key the row key
* @param indexedColumnsInRow all column names in row
*/
public void deleteFromIndexes(DecoratedKey key, List<Column> indexedColumnsInRow)
{
// Update entire row only once per row level index
Set<Class<? extends SecondaryIndex>> cleanedRowLevelIndexes = null;
for (Column column : indexedColumnsInRow)
{
SecondaryIndex index = indexesByColumn.get(column.name());
if (index == null)
continue;
if (index instanceof PerRowSecondaryIndex)
{
if (cleanedRowLevelIndexes == null)
cleanedRowLevelIndexes = new HashSet<Class<? extends SecondaryIndex>>();
if (cleanedRowLevelIndexes.add(index.getClass()))
((PerRowSecondaryIndex)index).delete(key);
}
else
{
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
}
/**
* This helper acts as a closure around the indexManager
* and row key to ensure that down in Memtable's ColumnFamily implementation, the index
* can get updated. Note: only a CF backed by AtomicSortedColumns implements this behaviour
* fully, other types simply ignore the index updater.
*/
public Updater updaterFor(final DecoratedKey key)
{
return (indexesByColumn.isEmpty() && rowLevelIndexMap.isEmpty())
? nullUpdater
: new StandardUpdater(key);
}
/**
* Get a list of IndexSearchers from the union of expression index types
* @param clause the query clause
* @return the searchers needed to query the index
*/
private List<SecondaryIndexSearcher> getIndexSearchersForQuery(List<IndexExpression> clause)
{
Map<String, Set<ByteBuffer>> groupByIndexType = new HashMap<String, Set<ByteBuffer>>();
//Group columns by type
for (IndexExpression ix : clause)
{
SecondaryIndex index = getIndexForColumn(ix.column_name);
if (index == null)
continue;
Set<ByteBuffer> columns = groupByIndexType.get(index.getClass().getCanonicalName());
if (columns == null)
{
columns = new HashSet<ByteBuffer>();
groupByIndexType.put(index.getClass().getCanonicalName(), columns);
}
columns.add(ix.column_name);
}
List<SecondaryIndexSearcher> indexSearchers = new ArrayList<SecondaryIndexSearcher>(groupByIndexType.size());
//create searcher per type
for (Set<ByteBuffer> column : groupByIndexType.values())
indexSearchers.add(getIndexForColumn(column.iterator().next()).createSecondaryIndexSearcher(column));
return indexSearchers;
}
/**
* Performs a search across a number of column indexes
* TODO: add support for querying across index types
*
* @param clause the index query clause
* @param range the row range to restrict to
* @param dataFilter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IDiskAtomFilter dataFilter, boolean countCQL3Rows)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(clause);
if (indexSearchers.isEmpty())
return Collections.emptyList();
//We currently don't support searching across multiple index types
if (indexSearchers.size() > 1)
throw new RuntimeException("Unable to search across multiple secondary index types");
return indexSearchers.get(0).search(clause, range, maxResults, dataFilter, countCQL3Rows);
}
public Collection<SecondaryIndex> getIndexesByNames(Set<String> idxNames)
{
List<SecondaryIndex> result = new ArrayList<SecondaryIndex>();
for (SecondaryIndex index : indexesByColumn.values())
{
if (idxNames.contains(index.getIndexName()))
result.add(index);
}
return result;
}
public void setIndexBuilt(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexBuilt();
}
public void setIndexRemoved(Set<String> idxNames)
{
for (SecondaryIndex index : getIndexesByNames(idxNames))
index.setIndexRemoved();
}
public boolean validate(Column column)
{
SecondaryIndex index = getIndexForColumn(column.name());
return index != null ? index.validate(column) : true;
}
public static interface Updater
{
/** called when constructing the index against pre-existing data */
public void insert(Column column);
/** called when updating the index from a memtable */
public void update(Column oldColumn, Column column);
/** called when lazy-updating the index during compaction (CASSANDRA-2897) */
public void remove(Column current);
/** called after memtable updates are complete (CASSANDRA-5397) */
public void updateRowLevelIndexes();
}
private class StandardUpdater implements Updater
{
private final DecoratedKey key;
public StandardUpdater(DecoratedKey key)
{
this.key = key;
}
public void insert(Column column)
{
if (column.isMarkedForDelete())
return;
<<<<<<< MINE
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
public void update(IColumn oldColumn, IColumn column)
{
if (oldColumn.equals(column))
return;
SecondaryIndex index = indexFor(column.name());
if (index == null)
return;
if (index instanceof PerColumnSecondaryIndex)
=======
for (SecondaryIndex index : indexFor(column.name()))
>>>>>>> YOURS
{
<<<<<<< MINE
// insert the new value before removing the old one, so we never have a period
// where the row is invisible to both queries (the opposite seems preferable); see CASSANDRA-5540
if (!column.isMarkedForDelete())
=======
if (index instanceof PerColumnSecondaryIndex)
>>>>>>> YOURS
((PerColumnSecondaryIndex) index).insert(key.key, column);
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
}
}
public void update(Column oldColumn, Column column)
{
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
{
((PerColumnSecondaryIndex) index).delete(key.key, oldColumn);
if (!column.isMarkedForDelete())
((PerColumnSecondaryIndex) index).insert(key.key, column);
}
}
}
public void remove(Column column)
{
if (column.isMarkedForDelete())
return;
for (SecondaryIndex index : indexFor(column.name()))
{
if (index instanceof PerColumnSecondaryIndex)
((PerColumnSecondaryIndex) index).delete(key.key, column);
}
}
public void updateRowLevelIndexes()
{
for (SecondaryIndex index : rowLevelIndexMap.values())
((PerRowSecondaryIndex) index).index(key.key);
}
}
}
Diff Result
No diff
Case 16 - java_cassandra.rev_e1b10_769fe..SSTableNamesIterator.java
Base
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOError;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements IColumnIterator
{
private static Logger logger = LoggerFactory.getLogger(SSTableNamesIterator.class);
private ColumnFamily cf;
private Iterator<IColumn> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
assert columns != null;
this.columns = columns;
this.key = key;
FileDataInput file = sstable.getFileDataInput(key);
if (file == null)
return;
try
{
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
read(sstable, file);
}
catch (IOException e)
{
sstable.markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(file);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
private void read(SSTableReader sstable, FileDataInput file)
throws IOException
{
Filter bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.usesOldBloomFilter);
List<IndexHelper.IndexInfo> indexList = IndexHelper.deserializeIndex(file);
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer();
try {
cf = serializer.deserializeFromSSTableNoColumns(ColumnFamily.create(sstable.metadata), file);
} catch (Exception e) {
throw new IOException
(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList == null)
readSimpleColumns(file, columns, filteredColumnNames);
else
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList);
// create an iterator view of the columns we read
iter = cf.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames) throws IOException
{
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
IColumn column = cf.getColumnSerializer().deserialize(file);
if (columnNames.contains(column.name()))
{
cf.addColumn(column);
if (n++ > filteredColumnNames.size())
break;
}
}
}
private void readIndexedColumns(CFMetaData metadata, FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<IndexHelper.IndexInfo> indexList)
throws IOException
{
file.readInt(); // column count
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
SortedSet<IndexHelper.IndexInfo> ranges = new TreeSet<IndexHelper.IndexInfo>(IndexHelper.getComparator(comparator, false));
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
if (comparator.compare(name, indexInfo.firstName) < 0)
continue;
ranges.add(indexInfo);
}
FileMark mark = file.mark();
for (IndexHelper.IndexInfo indexInfo : ranges)
{
file.reset(mark);
FileUtils.skipBytesFully(file, indexInfo.offset);
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width)
{
IColumn column = cf.getColumnSerializer().deserialize(file);
// we check vs the original Set, not the filtered List, for efficiency
if (columnNames.contains(column.name()))
{
cf.addColumn(column);
}
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected IColumn computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOError;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements IColumnIterator
{
private static Logger logger = LoggerFactory.getLogger(SSTableNamesIterator.class);
private ColumnFamily cf;
private Iterator<IColumn> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
assert columns != null;
this.columns = columns;
this.key = key;
FileDataInput file = sstable.getFileDataInput(key);
if (file == null)
return;
try
{
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
read(sstable, file);
}
catch (IOException e)
{
sstable.markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(file);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
private void read(SSTableReader sstable, FileDataInput file)
throws IOException
{
Filter bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.usesOldBloomFilter);
List<IndexHelper.IndexInfo> indexList = IndexHelper.deserializeIndex(file);
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer();
try {
cf = serializer.deserializeFromSSTableNoColumns(ColumnFamily.create(sstable.metadata), file);
} catch (Exception e) {
throw new IOException
(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList == null)
readSimpleColumns(file, columns, filteredColumnNames);
else
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList);
// create an iterator view of the columns we read
iter = cf.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames) throws IOException
{
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
IColumn column = cf.getColumnSerializer().deserialize(file);
if (columnNames.contains(column.name()))
{
cf.addColumn(column);
if (n++ > filteredColumnNames.size())
break;
}
}
}
private void readIndexedColumns(CFMetaData metadata, FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<IndexHelper.IndexInfo> indexList)
throws IOException
{
file.readInt(); // column count
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
SortedSet<IndexHelper.IndexInfo> ranges = new TreeSet<IndexHelper.IndexInfo>(IndexHelper.getComparator(comparator, false));
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
if (comparator.compare(name, indexInfo.firstName) < 0)
continue;
ranges.add(indexInfo);
}
FileMark mark = file.mark();
for (IndexHelper.IndexInfo indexInfo : ranges)
{
file.reset(mark);
FileUtils.skipBytesFully(file, indexInfo.offset);
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width)
{
IColumn column = cf.getColumnSerializer().deserialize(file);
// we check vs the original Set, not the filtered List, for efficiency
if (columnNames.contains(column.name()))
{
cf.addColumn(column);
}
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected IColumn computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
Left
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOError;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements ISSTableColumnIterator
{
private static Logger logger = LoggerFactory.getLogger(SSTableNamesIterator.class);
private ColumnFamily cf;
private Iterator<IColumn> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
private final SSTableReader sstable;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
FileDataInput file = sstable.getFileDataInput(key);
if (file == null)
return;
try
{
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
read(sstable, file);
}
catch (IOException e)
{
sstable.markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(file);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
public SSTableReader getSStable()
{
return sstable;
}
private void read(SSTableReader sstable, FileDataInput file)
throws IOException
{
Filter bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.usesOldBloomFilter);
List<IndexHelper.IndexInfo> indexList = IndexHelper.deserializeIndex(file);
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer();
try {
cf = serializer.deserializeFromSSTableNoColumns(ColumnFamily.create(sstable.metadata), file);
} catch (Exception e) {
throw new IOException
(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList == null)
readSimpleColumns(file, columns, filteredColumnNames);
else
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList);
// create an iterator view of the columns we read
iter = cf.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames) throws IOException
{
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
IColumn column = cf.getColumnSerializer().deserialize(file);
if (columnNames.contains(column.name()))
{
cf.addColumn(column);
if (n++ > filteredColumnNames.size())
break;
}
}
}
private void readIndexedColumns(CFMetaData metadata, FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<IndexHelper.IndexInfo> indexList)
throws IOException
{
file.readInt(); // column count
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
SortedSet<IndexHelper.IndexInfo> ranges = new TreeSet<IndexHelper.IndexInfo>(IndexHelper.getComparator(comparator, false));
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
if (comparator.compare(name, indexInfo.firstName) < 0)
continue;
ranges.add(indexInfo);
}
FileMark mark = file.mark();
for (IndexHelper.IndexInfo indexInfo : ranges)
{
file.reset(mark);
FileUtils.skipBytesFully(file, indexInfo.offset);
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width)
{
IColumn column = cf.getColumnSerializer().deserialize(file);
// we check vs the original Set, not the filtered List, for efficiency
if (columnNames.contains(column.name()))
{
cf.addColumn(column);
}
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected IColumn computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOError;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements ISSTableColumnIterator
{
private static Logger logger = LoggerFactory.getLogger(SSTableNamesIterator.class);
private ColumnFamily cf;
private Iterator<IColumn> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
private final SSTableReader sstable;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
FileDataInput file = sstable.getFileDataInput(key);
if (file == null)
return;
try
{
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
read(sstable, file);
}
catch (IOException e)
{
sstable.markSuspect();
throw new IOError(e);
}
finally
{
FileUtils.closeQuietly(file);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
public SSTableReader getSStable()
{
return sstable;
}
private void read(SSTableReader sstable, FileDataInput file)
throws IOException
{
Filter bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.usesOldBloomFilter);
List<IndexHelper.IndexInfo> indexList = IndexHelper.deserializeIndex(file);
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer();
try {
cf = serializer.deserializeFromSSTableNoColumns(ColumnFamily.create(sstable.metadata), file);
} catch (Exception e) {
throw new IOException
(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList == null)
readSimpleColumns(file, columns, filteredColumnNames);
else
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList);
// create an iterator view of the columns we read
iter = cf.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames) throws IOException
{
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
IColumn column = cf.getColumnSerializer().deserialize(file);
if (columnNames.contains(column.name()))
{
cf.addColumn(column);
if (n++ > filteredColumnNames.size())
break;
}
}
}
private void readIndexedColumns(CFMetaData metadata, FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<IndexHelper.IndexInfo> indexList)
throws IOException
{
file.readInt(); // column count
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
SortedSet<IndexHelper.IndexInfo> ranges = new TreeSet<IndexHelper.IndexInfo>(IndexHelper.getComparator(comparator, false));
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
if (comparator.compare(name, indexInfo.firstName) < 0)
continue;
ranges.add(indexInfo);
}
FileMark mark = file.mark();
for (IndexHelper.IndexInfo indexInfo : ranges)
{
file.reset(mark);
FileUtils.skipBytesFully(file, indexInfo.offset);
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width)
{
IColumn column = cf.getColumnSerializer().deserialize(file);
// we check vs the original Set, not the filtered List, for efficiency
if (columnNames.contains(column.name()))
{
cf.addColumn(column);
}
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected IColumn computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
Right
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.columniterator;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements OnDiskAtomIterator
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
if (!indexEntry.isIndexed())
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.columniterator;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements OnDiskAtomIterator
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
if (!indexEntry.isIndexed())
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
MergeMethods
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator <<<<<<< MINE
implements ISSTableColumnIterator
=======
implements OnDiskAtomIterator
>>>>>>> YOURS
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
public SSTableReader getSStable()
{
return sstable;
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
if (!indexEntry.isIndexed())
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator <<<<<<< MINE
implements ISSTableColumnIterator
=======
implements OnDiskAtomIterator
>>>>>>> YOURS
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
public SSTableReader getSStable()
{
return sstable;
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
if (!indexEntry.isIndexed())
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
KeepBothMethods
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator <<<<<<< MINE
implements ISSTableColumnIterator
=======
implements OnDiskAtomIterator
>>>>>>> YOURS
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
public SSTableReader getSStable()
{
return sstable;
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
if (!indexEntry.isIndexed())
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator <<<<<<< MINE
implements ISSTableColumnIterator
=======
implements OnDiskAtomIterator
>>>>>>> YOURS
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
public SSTableReader getSStable()
{
return sstable;
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
if (!indexEntry.isIndexed())
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
Safe
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator <<<<<<< MINE
implements ISSTableColumnIterator
=======
implements OnDiskAtomIterator
>>>>>>> YOURS
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
<<<<<<< MINE
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
=======
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
>>>>>>> YOURS
public SSTableReader getSStable()
{
return sstable;
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
if (!indexEntry.isIndexed())
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
package org.apache.cassandra.db.columniterator;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
public class SSTableNamesIterator extends SimpleAbstractColumnIterator <<<<<<< MINE
implements ISSTableColumnIterator
=======
implements OnDiskAtomIterator
>>>>>>> YOURS
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
<<<<<<< MINE
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
=======
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
{
read(sstable, file);
}
catch (IOException ioe)
{
sstable.markSuspect();
throw new IOError(ioe);
}
}
>>>>>>> YOURS
public SSTableReader getSStable()
{
return sstable;
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
if (!indexEntry.isIndexed())
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
Unstructured
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.columniterator;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
<<<<<<< MINE
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements ISSTableColumnIterator
=======
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements OnDiskAtomIterator
>>>>>>> YOURS
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
private final SSTableReader sstable;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
<<<<<<< MINE
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
=======
if (!indexEntry.isIndexed())
>>>>>>> YOURS
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
<<<<<<< MINE
sstable.markSuspect();
throw new IOError(ioe);
}
}
public SSTableReader getSStable()
{
return sstable;
}
private void read(SSTableReader sstable, FileDataInput file)
throws IOException
{
Filter bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.usesOldBloomFilter);
List<IndexHelper.IndexInfo> indexList = IndexHelper.deserializeIndex(file);
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer();
try {
cf = serializer.deserializeFromSSTableNoColumns(ColumnFamily.create(sstable.metadata), file);
} catch (Exception e) {
throw new IOException
(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
=======
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
>>>>>>> YOURS
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.columniterator;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.ColumnFamilySerializer;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DeletionInfo;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.RowIndexEntry;
import org.apache.cassandra.db.OnDiskAtom;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.IndexHelper;
import org.apache.cassandra.io.sstable.SSTableReader;
import org.apache.cassandra.io.util.FileDataInput;
import org.apache.cassandra.io.util.FileMark;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.Filter;
<<<<<<< MINE
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements ISSTableColumnIterator
=======
public class SSTableNamesIterator extends SimpleAbstractColumnIterator implements OnDiskAtomIterator
>>>>>>> YOURS
{
private ColumnFamily cf;
private final SSTableReader sstable;
private FileDataInput fileToClose;
private Iterator<OnDiskAtom> iter;
public final SortedSet<ByteBuffer> columns;
public final DecoratedKey key;
private final SSTableReader sstable;
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
if (indexEntry == null)
return;
try
{
read(sstable, null, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
finally
{
if (fileToClose != null)
FileUtils.closeQuietly(fileToClose);
}
}
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
private FileDataInput createFileDataInput(long position)
{
fileToClose = sstable.getFileDataInput(position);
return fileToClose;
}
private void read(SSTableReader sstable, FileDataInput file, RowIndexEntry indexEntry)
throws IOException
{
Filter bf;
List<IndexHelper.IndexInfo> indexList;
// If the entry is not indexed or the index is not promoted, read from the row start
if (!indexEntry.isIndexed())
{
if (file == null)
file = createFileDataInput(indexEntry.position);
else
file.seek(indexEntry.position);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner,
sstable.descriptor,
ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath());
SSTableReader.readRowSize(file, sstable.descriptor);
}
if (sstable.descriptor.version.hasPromotedIndexes)
{
bf = indexEntry.isIndexed() ? indexEntry.bloomFilter() : null;
indexList = indexEntry.columnsIndex();
}
else
{
assert file != null;
bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.version.filterType);
indexList = IndexHelper.deserializeIndex(file);
}
<<<<<<< MINE
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns)
{
this.sstable = sstable;
assert columns != null;
this.columns = columns;
this.key = key;
try
=======
if (!indexEntry.isIndexed())
>>>>>>> YOURS
{
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer;
try
{
cf = ColumnFamily.create(sstable.metadata);
cf.delete(DeletionInfo.serializer().deserializeFromSSTable(file, sstable.descriptor.version));
}
catch (Exception e)
{
throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
}
}
else
{
<<<<<<< MINE
sstable.markSuspect();
throw new IOError(ioe);
}
}
public SSTableReader getSStable()
{
return sstable;
}
private void read(SSTableReader sstable, FileDataInput file)
throws IOException
{
Filter bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.usesOldBloomFilter);
List<IndexHelper.IndexInfo> indexList = IndexHelper.deserializeIndex(file);
// we can stop early if bloom filter says none of the columns actually exist -- but,
// we can't stop before initializing the cf above, in case there's a relevant tombstone
ColumnFamilySerializer serializer = ColumnFamily.serializer();
try {
cf = serializer.deserializeFromSSTableNoColumns(ColumnFamily.create(sstable.metadata), file);
} catch (Exception e) {
throw new IOException
(serializer + " failed to deserialize " + sstable.getColumnFamilyName() + " with " + sstable.metadata + " from " + file, e);
=======
cf = ColumnFamily.create(sstable.metadata);
cf.delete(indexEntry.deletionInfo());
>>>>>>> YOURS
}
List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
List<ByteBuffer> filteredColumnNames = new ArrayList<ByteBuffer>(columns.size());
for (ByteBuffer name : columns)
{
if (bf == null || bf.isPresent(name))
{
filteredColumnNames.add(name);
}
}
if (filteredColumnNames.isEmpty())
return;
if (indexList.isEmpty())
{
readSimpleColumns(file, columns, filteredColumnNames, result);
}
else
{
long basePosition;
if (sstable.descriptor.version.hasPromotedIndexes)
{
basePosition = indexEntry.position;
}
else
{
assert file != null;
file.readInt(); // column count
basePosition = file.getFilePointer();
}
readIndexedColumns(sstable.metadata, file, columns, filteredColumnNames, indexList, basePosition, result);
}
// create an iterator view of the columns we read
iter = result.iterator();
}
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (n++ > filteredColumnNames.size())
break;
}
}
else
{
result.add(column);
}
}
}
private void readIndexedColumns(CFMetaData metadata,
FileDataInput file,
SortedSet<ByteBuffer> columnNames,
List<ByteBuffer> filteredColumnNames,
List<IndexHelper.IndexInfo> indexList,
long basePosition,
List<OnDiskAtom> result)
throws IOException
{
/* get the various column ranges we have to read */
AbstractType<?> comparator = metadata.comparator;
List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
int lastIndexIdx = -1;
for (ByteBuffer name : filteredColumnNames)
{
int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
// Check the index block does contain the column names and that we haven't inserted this block yet.
if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx)
continue;
ranges.add(indexInfo);
lastIndexIdx = index;
}
if (ranges.isEmpty())
return;
for (IndexHelper.IndexInfo indexInfo : ranges)
{
long positionToSeek = basePosition + indexInfo.offset;
// With new promoted indexes, our first seek in the data file will happen at that point.
if (file == null)
file = createFileDataInput(positionToSeek);
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
file.seek(positionToSeek);
FileMark mark = file.mark();
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.width)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// we check vs the original Set, not the filtered List, for efficiency
if (!(column instanceof IColumn) || columnNames.contains(column.name()))
result.add(column);
}
}
}
public DecoratedKey getKey()
{
return key;
}
public ColumnFamily getColumnFamily()
{
return cf;
}
protected OnDiskAtom computeNext()
{
if (iter == null || !iter.hasNext())
return endOfData();
return iter.next();
}
}
Diff Result
No diff
Case 17 - java_gradle.rev_16c71_dcc4b..ExternalResourceResolver.java
Base
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.ModuleVersionIdentifier;
import org.gradle.api.artifacts.ModuleVersionSelector;
import org.gradle.api.artifacts.resolution.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.resolution.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.resolution.SoftwareArtifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.DefaultModuleVersionIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.resolution.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files
ModuleVersionArtifactMetaData metaDataArtifact = getMetaDataArtifactFor(dependency);
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (ModuleVersionArtifactMetaData otherArtifact : getDefaultMetaData(dependency).getArtifacts()) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, artifact);
}
}
public void getDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = findMetaDataArtifact(dependency, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData defaultArtifactMetaData = findDefaultArtifact(dependency, artifactResolver);
if (defaultArtifactMetaData != null) {
LOGGER.debug("Artifact file found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.resolved(defaultArtifactMetaData, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.missing();
}
protected MutableModuleVersionMetaData findMetaDataArtifact(DependencyMetaData dependency, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(dependency);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = getArtifactMetadata(artifact, metaDataResource);
if (isCheckconsistency()) {
ModuleVersionSelector requested = dependency.getRequested();
ModuleVersionIdentifier requestedId = DefaultModuleVersionIdentifier.newId(requested.getGroup(), requested.getName(), requested.getVersion());
checkMetadataConsistency(requestedId, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
protected MutableModuleVersionMetaData getArtifactMetadata(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData findDefaultArtifact(DependencyMetaData dependency, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaData = getDefaultMetaData(dependency);
if (hasArtifacts(metaData, artifactResolver)) {
LOGGER.debug("No meta-data file found for module '{}' in repository '{}', using default data instead.", dependency.getRequested(), getName());
return metaData;
}
return null;
}
protected MutableModuleVersionMetaData getDefaultMetaData(DependencyMetaData dependency) {
MutableModuleVersionMetaData metaData = ModuleDescriptorAdapter.defaultForDependency(dependency);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleVersionIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getName().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getName() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
@Nullable
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(DependencyMetaData dependency) {
return getMetaDataArtifactFor(DefaultModuleVersionIdentifier.newId(dependency.getDescriptor().getDependencyRevisionId()));
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else {
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
@Nullable
protected abstract ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleVersionIdentifier moduleVersionIdentifier);
protected boolean hasArtifacts(ModuleVersionMetaData metaData, ArtifactResolver artifactResolver) {
for (ModuleVersionArtifactMetaData artifactMetaData : metaData.getArtifacts()) {
if (artifactResolver.artifactExists(artifactMetaData)) {
return true;
}
}
return false;
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.ModuleVersionIdentifier;
import org.gradle.api.artifacts.ModuleVersionSelector;
import org.gradle.api.artifacts.resolution.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.resolution.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.resolution.SoftwareArtifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.DefaultModuleVersionIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.resolution.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files
ModuleVersionArtifactMetaData metaDataArtifact = getMetaDataArtifactFor(dependency);
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (ModuleVersionArtifactMetaData otherArtifact : getDefaultMetaData(dependency).getArtifacts()) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, artifact);
}
}
public void getDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = findMetaDataArtifact(dependency, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData defaultArtifactMetaData = findDefaultArtifact(dependency, artifactResolver);
if (defaultArtifactMetaData != null) {
LOGGER.debug("Artifact file found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.resolved(defaultArtifactMetaData, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.missing();
}
protected MutableModuleVersionMetaData findMetaDataArtifact(DependencyMetaData dependency, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(dependency);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = getArtifactMetadata(artifact, metaDataResource);
if (isCheckconsistency()) {
ModuleVersionSelector requested = dependency.getRequested();
ModuleVersionIdentifier requestedId = DefaultModuleVersionIdentifier.newId(requested.getGroup(), requested.getName(), requested.getVersion());
checkMetadataConsistency(requestedId, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
protected MutableModuleVersionMetaData getArtifactMetadata(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData findDefaultArtifact(DependencyMetaData dependency, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaData = getDefaultMetaData(dependency);
if (hasArtifacts(metaData, artifactResolver)) {
LOGGER.debug("No meta-data file found for module '{}' in repository '{}', using default data instead.", dependency.getRequested(), getName());
return metaData;
}
return null;
}
protected MutableModuleVersionMetaData getDefaultMetaData(DependencyMetaData dependency) {
MutableModuleVersionMetaData metaData = ModuleDescriptorAdapter.defaultForDependency(dependency);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleVersionIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getName().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getName() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
@Nullable
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(DependencyMetaData dependency) {
return getMetaDataArtifactFor(DefaultModuleVersionIdentifier.newId(dependency.getDescriptor().getDependencyRevisionId()));
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else {
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
@Nullable
protected abstract ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleVersionIdentifier moduleVersionIdentifier);
protected boolean hasArtifacts(ModuleVersionMetaData metaData, ArtifactResolver artifactResolver) {
for (ModuleVersionArtifactMetaData artifactMetaData : metaData.getArtifacts()) {
if (artifactResolver.artifactExists(artifactMetaData)) {
return true;
}
}
return false;
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
Left
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.ModuleVersionIdentifier;
import org.gradle.api.artifacts.ModuleVersionSelector;
import org.gradle.api.artifacts.resolution.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.resolution.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.resolution.SoftwareArtifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.DefaultModuleVersionIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files
ModuleVersionArtifactMetaData metaDataArtifact = getMetaDataArtifactFor(dependency);
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (ModuleVersionArtifactMetaData otherArtifact : getDefaultMetaData(dependency).getArtifacts()) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, artifact);
}
}
public void getDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = findMetaDataArtifact(dependency, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData defaultArtifactMetaData = findDefaultArtifact(dependency, artifactResolver);
if (defaultArtifactMetaData != null) {
LOGGER.debug("Artifact file found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.resolved(defaultArtifactMetaData, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.missing();
}
protected MutableModuleVersionMetaData findMetaDataArtifact(DependencyMetaData dependency, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(dependency);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = getArtifactMetadata(artifact, metaDataResource);
if (isCheckconsistency()) {
ModuleVersionSelector requested = dependency.getRequested();
ModuleVersionIdentifier requestedId = DefaultModuleVersionIdentifier.newId(requested.getGroup(), requested.getName(), requested.getVersion());
checkMetadataConsistency(requestedId, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
protected MutableModuleVersionMetaData getArtifactMetadata(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData findDefaultArtifact(DependencyMetaData dependency, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaData = getDefaultMetaData(dependency);
if (hasArtifacts(metaData, artifactResolver)) {
LOGGER.debug("No meta-data file found for module '{}' in repository '{}', using default data instead.", dependency.getRequested(), getName());
return metaData;
}
return null;
}
protected MutableModuleVersionMetaData getDefaultMetaData(DependencyMetaData dependency) {
MutableModuleVersionMetaData metaData = ModuleDescriptorAdapter.defaultForDependency(dependency);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleVersionIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getName().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getName() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
@Nullable
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(DependencyMetaData dependency) {
return getMetaDataArtifactFor(DefaultModuleVersionIdentifier.newId(dependency.getDescriptor().getDependencyRevisionId()));
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
@Nullable
protected abstract ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleVersionIdentifier moduleVersionIdentifier);
protected boolean hasArtifacts(ModuleVersionMetaData metaData, ArtifactResolver artifactResolver) {
for (ModuleVersionArtifactMetaData artifactMetaData : metaData.getArtifacts()) {
if (artifactResolver.artifactExists(artifactMetaData)) {
return true;
}
}
return false;
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.ModuleVersionIdentifier;
import org.gradle.api.artifacts.ModuleVersionSelector;
import org.gradle.api.artifacts.resolution.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.resolution.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.resolution.SoftwareArtifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.DefaultModuleVersionIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files
ModuleVersionArtifactMetaData metaDataArtifact = getMetaDataArtifactFor(dependency);
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (ModuleVersionArtifactMetaData otherArtifact : getDefaultMetaData(dependency).getArtifacts()) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, artifact);
}
}
public void getDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = findMetaDataArtifact(dependency, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData defaultArtifactMetaData = findDefaultArtifact(dependency, artifactResolver);
if (defaultArtifactMetaData != null) {
LOGGER.debug("Artifact file found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.resolved(defaultArtifactMetaData, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", dependency.getRequested(), getName());
result.missing();
}
protected MutableModuleVersionMetaData findMetaDataArtifact(DependencyMetaData dependency, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(dependency);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = getArtifactMetadata(artifact, metaDataResource);
if (isCheckconsistency()) {
ModuleVersionSelector requested = dependency.getRequested();
ModuleVersionIdentifier requestedId = DefaultModuleVersionIdentifier.newId(requested.getGroup(), requested.getName(), requested.getVersion());
checkMetadataConsistency(requestedId, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
protected MutableModuleVersionMetaData getArtifactMetadata(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData findDefaultArtifact(DependencyMetaData dependency, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaData = getDefaultMetaData(dependency);
if (hasArtifacts(metaData, artifactResolver)) {
LOGGER.debug("No meta-data file found for module '{}' in repository '{}', using default data instead.", dependency.getRequested(), getName());
return metaData;
}
return null;
}
protected MutableModuleVersionMetaData getDefaultMetaData(DependencyMetaData dependency) {
MutableModuleVersionMetaData metaData = ModuleDescriptorAdapter.defaultForDependency(dependency);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleVersionIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getName().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getName() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
@Nullable
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(DependencyMetaData dependency) {
return getMetaDataArtifactFor(DefaultModuleVersionIdentifier.newId(dependency.getDescriptor().getDependencyRevisionId()));
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
@Nullable
protected abstract ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleVersionIdentifier moduleVersionIdentifier);
protected boolean hasArtifacts(ModuleVersionMetaData metaData, ArtifactResolver artifactResolver) {
for (ModuleVersionArtifactMetaData artifactMetaData : metaData.getArtifacts()) {
if (artifactResolver.artifactExists(artifactMetaData)) {
return true;
}
}
return false;
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
Right
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else {
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else {
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
MergeMethods
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
<<<<<<< MINE
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
=======
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
>>>>>>> YOURS
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
<<<<<<< MINE
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
=======
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
>>>>>>> YOURS
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
KeepBothMethods
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
<<<<<<< MINE
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
=======
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
>>>>>>> YOURS
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
<<<<<<< MINE
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
=======
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
>>>>>>> YOURS
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
Safe
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
<<<<<<< MINE
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
=======
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
>>>>>>> YOURS
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
<<<<<<< MINE
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
=======
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
>>>>>>> YOURS
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
<<<<<<< MINE
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
=======
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
>>>>>>> YOURS
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
<<<<<<< MINE
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
=======
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
>>>>>>> YOURS
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
Unstructured
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
<<<<<<< MINE
=======
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
>>>>>>> YOURS
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
<<<<<<< MINE
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
=======
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
>>>>>>> YOURS
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
<<<<<<< MINE
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
=======
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
>>>>>>> YOURS
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.resolver;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.ivy.core.module.descriptor.DependencyArtifactDescriptor;
import org.apache.ivy.core.module.descriptor.DependencyDescriptor;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.matcher.PatternMatcher;
import org.gradle.api.Nullable;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.artifacts.ModuleIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.DefaultModuleIdentifier;
import org.gradle.api.internal.artifacts.ModuleVersionPublisher;
import org.gradle.api.internal.artifacts.component.DefaultModuleComponentIdentifier;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParseException;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.parser.MetaDataParser;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.strategy.ResolverStrategy;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache;
<<<<<<< MINE
=======
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
>>>>>>> YOURS
import org.gradle.api.internal.externalresource.ExternalResource;
import org.gradle.api.internal.externalresource.LocallyAvailableExternalResource;
import org.gradle.api.internal.externalresource.MetaDataOnlyExternalResource;
import org.gradle.api.internal.externalresource.MissingExternalResource;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceCandidates;
import org.gradle.api.internal.externalresource.local.LocallyAvailableResourceFinder;
import org.gradle.api.internal.externalresource.metadata.ExternalResourceMetaData;
import org.gradle.api.internal.externalresource.transport.ExternalResourceRepository;
import org.gradle.internal.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.gradle.api.internal.artifacts.repositories.cachemanager.RepositoryArtifactCache.ExternalResourceDownloader;
public abstract class ExternalResourceResolver implements ModuleVersionPublisher, ConfiguredModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalResourceResolver.class);
private final MetaDataParser metaDataParser;
private List<String> ivyPatterns = new ArrayList<String>();
private List<String> artifactPatterns = new ArrayList<String>();
private boolean m2Compatible;
private boolean checkConsistency = true;
private boolean allowMissingDescriptor = true;
private boolean force;
private String checksums;
private String name;
private RepositoryArtifactCache repositoryCacheManager;
private String changingMatcherName;
private String changingPattern;
private RepositoryChain repositoryChain;
private final ExternalResourceRepository repository;
private final LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder;
private final ResolverStrategy resolverStrategy;
protected VersionLister versionLister;
public ExternalResourceResolver(String name,
ExternalResourceRepository repository,
VersionLister versionLister,
LocallyAvailableResourceFinder<ModuleVersionArtifactMetaData> locallyAvailableResourceFinder,
MetaDataParser metaDataParser,
ResolverStrategy resolverStrategy) {
this.name = name;
this.versionLister = versionLister;
this.repository = repository;
this.locallyAvailableResourceFinder = locallyAvailableResourceFinder;
this.metaDataParser = metaDataParser;
this.resolverStrategy = resolverStrategy;
}
public String getId() {
return DependencyResolverIdentifier.forExternalResourceResolver(this);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public boolean isDynamicResolveMode() {
return false;
}
public String toString() {
return String.format("Repository '%s'", getName());
}
public void setRepositoryChain(RepositoryChain resolver) {
this.repositoryChain = resolver;
}
protected ExternalResourceRepository getRepository() {
return repository;
}
public boolean isLocal() {
return repositoryCacheManager.isLocal();
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
ModuleIdentifier module = new DefaultModuleIdentifier(dependency.getRequested().getGroup(), dependency.getRequested().getName());
VersionList versionList = versionLister.getVersionList(module);
// List modules based on metadata files (artifact version is not considered in listVersionsForAllPatterns())
IvyArtifactName metaDataArtifact = getMetaDataArtifactName(dependency.getRequested().getName());
listVersionsForAllPatterns(getIvyPatterns(), metaDataArtifact, versionList);
// List modules with missing metadata files
if (isAllownomd()) {
for (IvyArtifactName otherArtifact : getDependencyArtifactNames(dependency)) {
listVersionsForAllPatterns(getArtifactPatterns(), otherArtifact, versionList);
}
}
DefaultModuleVersionListing moduleVersions = new DefaultModuleVersionListing();
for (VersionList.ListedVersion listedVersion : versionList.getVersions()) {
moduleVersions.add(listedVersion.getVersion());
}
result.listed(moduleVersions);
}
private void listVersionsForAllPatterns(List<String> patternList, IvyArtifactName ivyArtifactName, VersionList versionList) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
versionList.visit(resourcePattern, ivyArtifactName);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponentIdentifier, BuildableModuleVersionMetaDataResolveResult result) {
resolveStaticDependency(dependency, moduleComponentIdentifier, result, createArtifactResolver());
}
protected final void resolveStaticDependency(DependencyMetaData dependency, ModuleComponentIdentifier moduleVersionIdentifier, BuildableModuleVersionMetaDataResolveResult result, ArtifactResolver artifactResolver) {
MutableModuleVersionMetaData metaDataArtifactMetaData = parseMetaDataFromArtifact(moduleVersionIdentifier, artifactResolver);
if (metaDataArtifactMetaData != null) {
LOGGER.debug("Metadata file found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.resolved(metaDataArtifactMetaData, null);
return;
}
if (isAllownomd()) {
MutableModuleVersionMetaData metaDataFromDefaultArtifact = createMetaDataFromDefaultArtifact(moduleVersionIdentifier, dependency, artifactResolver);
if (metaDataFromDefaultArtifact != null) {
LOGGER.debug("Found artifact but no meta-data for module '{}' in repository '{}', using default meta-data.", moduleVersionIdentifier, getName());
result.resolved(metaDataFromDefaultArtifact, null);
return;
}
}
LOGGER.debug("No meta-data file or artifact found for module '{}' in repository '{}'.", moduleVersionIdentifier, getName());
result.missing();
}
protected MutableModuleVersionMetaData parseMetaDataFromArtifact(ModuleComponentIdentifier moduleVersionIdentifier, ArtifactResolver artifactResolver) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(moduleVersionIdentifier);
if (artifact == null) {
return null;
}
ExternalResource metaDataResource = artifactResolver.resolveMetaDataArtifact(artifact);
if (metaDataResource == null) {
return null;
}
MutableModuleVersionMetaData moduleVersionMetaData = downloadAndParseMetaDataArtifact(artifact, metaDataResource);
if (isCheckconsistency()) {
checkMetadataConsistency(moduleVersionIdentifier, moduleVersionMetaData);
}
return moduleVersionMetaData;
}
private MutableModuleVersionMetaData downloadAndParseMetaDataArtifact(ModuleVersionArtifactMetaData artifact, ExternalResource resource) {
ExternalResourceResolverDescriptorParseContext context = new ExternalResourceResolverDescriptorParseContext(repositoryChain, this);
LocallyAvailableExternalResource cachedResource;
try {
cachedResource = downloadAndCacheResource(artifact, resource);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
MutableModuleVersionMetaData metaData = metaDataParser.parseMetaData(context, cachedResource);
return processMetaData(metaData);
}
private MutableModuleVersionMetaData createMetaDataFromDefaultArtifact(ModuleComponentIdentifier moduleVersionIdentifier, DependencyMetaData dependency, ArtifactResolver artifactResolver) {
for (IvyArtifactName artifact : getDependencyArtifactNames(dependency)) {
if (artifactResolver.artifactExists(new DefaultModuleVersionArtifactMetaData(moduleVersionIdentifier, artifact))) {
return processMetaData(ModuleDescriptorAdapter.defaultForDependency(dependency));
}
}
return null;
}
private Set<IvyArtifactName> getDependencyArtifactNames(DependencyMetaData dependency) {
ModuleComponentIdentifier componentIdentifier = DefaultModuleComponentIdentifier.newId(dependency.getRequested().getGroup(), dependency.getRequested().getName(), dependency.getRequested().getVersion());
Set<IvyArtifactName> artifactSet = Sets.newLinkedHashSet();
DependencyDescriptor dependencyDescriptor = dependency.getDescriptor();
for (DependencyArtifactDescriptor artifact : dependencyDescriptor.getAllDependencyArtifacts()) {
artifactSet.add(new DefaultIvyArtifactName(dependency.getRequested().getName(), artifact.getType(), artifact.getExt(), artifact.getExtraAttributes()));
}
// TODO:DAZ This logic should be within the DependencyMetaData
if (artifactSet.isEmpty()) {
artifactSet.add(new DefaultIvyArtifactName(componentIdentifier.getModule(), "jar", "jar", Collections.<String, String>emptyMap()));
}
return artifactSet;
}
private MutableModuleVersionMetaData processMetaData(MutableModuleVersionMetaData metaData) {
metaData.setChanging(isChanging(metaData.getId().getVersion()));
return metaData;
}
private void checkMetadataConsistency(ModuleComponentIdentifier expectedId, ModuleVersionMetaData metadata) throws MetaDataParseException {
List<String> errors = new ArrayList<String>();
if (!expectedId.getGroup().equals(metadata.getId().getGroup())) {
errors.add("bad group: expected='" + expectedId.getGroup() + "' found='" + metadata.getId().getGroup() + "'");
}
if (!expectedId.getModule().equals(metadata.getId().getName())) {
errors.add("bad module name: expected='" + expectedId.getModule() + "' found='" + metadata.getId().getName() + "'");
}
if (!expectedId.getVersion().equals(metadata.getId().getVersion())) {
errors.add("bad version: expected='" + expectedId.getVersion() + "' found='" + metadata.getId().getVersion() + "'");
}
if (errors.size() > 0) {
throw new MetaDataParseException(String.format("inconsistent module metadata found. Descriptor: %s Errors: %s",
metadata.getId(), Joiner.on(SystemProperties.getLineSeparator()).join(errors)));
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts((ModuleVersionMetaData) component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ModuleVersionMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
try {
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
ConfigurationMetaData configuration = component.getConfiguration(configurationName);
resolveConfigurationArtifacts(component, configuration, result, localOnly);
} else {
<<<<<<< MINE
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == JvmLibraryJavadocArtifact.class) {
=======
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
if (artifactType == ComponentMetaDataArtifact.class) {
resolveMetaDataArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibraryJavadocArtifact.class) {
>>>>>>> YOURS
resolveJavadocArtifacts(component, result, localOnly);
} else if (artifactType == JvmLibrarySourcesArtifact.class) {
resolveSourceArtifacts(component, result, localOnly);
} else if (isMetaDataArtifact(artifactType)) {
resolveMetaDataArtifacts(component, result);
}
if (!localOnly && !result.hasResult()) {
result.failed(new ArtifactResolveException(component.getComponentId(),
String.format("Cannot locate artifacts of type %s for '%s' in repository '%s'", artifactType.getSimpleName(), component, name)));
}
}
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
protected void resolveConfigurationArtifacts(ModuleVersionMetaData module, ConfigurationMetaData configuration, BuildableArtifactSetResolveResult result, boolean localOnly) {
result.resolved(configuration.getArtifacts());
}
<<<<<<< MINE
protected abstract boolean isMetaDataArtifact(Class<? extends SoftwareArtifact> artifactType);
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getId());
=======
protected void resolveMetaDataArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionArtifactMetaData artifact = getMetaDataArtifactFor(module.getComponentId());
>>>>>>> YOURS
if (artifact != null) {
result.resolved(ImmutableSet.of(artifact));
}
}
protected void resolveJavadocArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "javadoc", "javadoc"));
}
}
protected void resolveSourceArtifacts(ModuleVersionMetaData module, BuildableArtifactSetResolveResult result, boolean localOnly) {
if (!localOnly) {
result.resolved(findOptionalArtifacts(module, "source", "sources"));
}
}
protected Set<ModuleVersionArtifactMetaData> findOptionalArtifacts(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (createArtifactResolver(module.getSource()).artifactExists(artifact)) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private ModuleVersionArtifactMetaData getMetaDataArtifactFor(ModuleComponentIdentifier moduleComponentIdentifier) {
IvyArtifactName ivyArtifactName = getMetaDataArtifactName(moduleComponentIdentifier.getModule());
if (ivyArtifactName == null) {
return null;
}
return new DefaultModuleVersionArtifactMetaData(moduleComponentIdentifier, ivyArtifactName);
}
@Nullable
protected abstract IvyArtifactName getMetaDataArtifactName(String moduleName);
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return createArtifactResolver().artifactExists(artifact);
}
// TODO:DAZ This is currently required to handle maven snapshots: if the timestamp was part of the identifier this wouldn't be required
protected ArtifactResolver createArtifactResolver(ModuleSource moduleSource) {
return createArtifactResolver();
}
private LocallyAvailableExternalResource downloadAndCacheResource(ModuleVersionArtifactMetaData artifact, ExternalResource resource) throws IOException {
final ExternalResourceDownloader resourceDownloader = new VerifyingExternalResourceDownloader(getChecksumAlgorithms(), getRepository());
return repositoryCacheManager.downloadAndCacheArtifactFile(artifact, resourceDownloader, resource);
}
public void resolveArtifact(ComponentArtifactMetaData componentArtifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
ModuleVersionArtifactMetaData artifact = (ModuleVersionArtifactMetaData) componentArtifact;
File localFile;
try {
localFile = download(artifact, moduleSource);
} catch (Throwable e) {
result.failed(new ArtifactResolveException(artifact.getId(), e));
return;
}
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
protected File download(ModuleVersionArtifactMetaData artifact, ModuleSource moduleSource) throws IOException {
return downloadArtifact(artifact, createArtifactResolver(moduleSource));
}
protected File downloadArtifact(ModuleVersionArtifactMetaData artifact, ArtifactResolver artifactResolver) throws IOException {
ExternalResource artifactResource = artifactResolver.resolveArtifact(artifact);
if (artifactResource == null) {
return null;
}
return downloadAndCacheResource(artifact, artifactResource).getLocalResource().getFile();
}
protected ArtifactResolver createArtifactResolver() {
return new ArtifactResolver(getIvyPatterns(), getArtifactPatterns());
}
public void setSettings(IvySettings settings) {
}
public void publish(ModuleVersionPublishMetaData moduleVersion) throws IOException {
for (ModuleVersionArtifactPublishMetaData artifact : moduleVersion.getArtifacts()) {
publish(new DefaultModuleVersionArtifactMetaData(artifact.getId()), artifact.getFile());
}
}
private void publish(ModuleVersionArtifactMetaData artifact, File src) throws IOException {
String destinationPattern;
if ("ivy".equals(artifact.getName().getType()) && !getIvyPatterns().isEmpty()) {
destinationPattern = getIvyPatterns().get(0);
} else if (!getArtifactPatterns().isEmpty()) {
destinationPattern = getArtifactPatterns().get(0);
} else {
throw new IllegalStateException("impossible to publish " + artifact + " using " + this + ": no artifact pattern defined");
}
String destination = toResourcePattern(destinationPattern).toPath(artifact);
put(src, destination);
LOGGER.info("Published {} to {}", artifact, destination);
}
private void put(File src, String destination) throws IOException {
String[] checksums = getChecksumAlgorithms();
if (checksums.length != 0) {
// Should not be reachable for publishing
throw new UnsupportedOperationException();
}
repository.put(src, destination);
}
public void addIvyPattern(String pattern) {
ivyPatterns.add(pattern);
}
public void addArtifactPattern(String pattern) {
artifactPatterns.add(pattern);
}
public List<String> getIvyPatterns() {
return Collections.unmodifiableList(ivyPatterns);
}
public List<String> getArtifactPatterns() {
return Collections.unmodifiableList(artifactPatterns);
}
protected void setIvyPatterns(List<String> patterns) {
ivyPatterns = patterns;
}
protected void setArtifactPatterns(List<String> patterns) {
artifactPatterns = patterns;
}
public boolean isM2compatible() {
return m2Compatible;
}
public void setM2compatible(boolean compatible) {
m2Compatible = compatible;
}
public boolean isCheckconsistency() {
return checkConsistency;
}
public void setCheckconsistency(boolean checkConsistency) {
this.checkConsistency = checkConsistency;
}
public void setForce(boolean force) {
this.force = force;
}
public boolean isForce() {
return force;
}
public boolean isAllownomd() {
return allowMissingDescriptor;
}
public void setAllownomd(boolean allowMissingDescriptor) {
this.allowMissingDescriptor = allowMissingDescriptor;
}
public String[] getChecksumAlgorithms() {
if (checksums == null) {
return new String[0];
}
// csDef is a comma separated list of checksum algorithms to use with this resolver
// we parse and return it as a String[]
String[] checksums = this.checksums.split(",");
List<String> algos = new ArrayList<String>();
for (int i = 0; i < checksums.length; i++) {
String cs = checksums[i].trim();
if (!"".equals(cs) && !"none".equals(cs)) {
algos.add(cs);
}
}
return algos.toArray(new String[algos.size()]);
}
public void setChecksums(String checksums) {
this.checksums = checksums;
}
public String getChangingMatcherName() {
return changingMatcherName;
}
public void setChangingMatcher(String changingMatcherName) {
this.changingMatcherName = changingMatcherName;
}
public String getChangingPattern() {
return changingPattern;
}
public void setChangingPattern(String changingPattern) {
this.changingPattern = changingPattern;
}
public void setRepositoryCacheManager(RepositoryArtifactCache repositoryCacheManager) {
this.repositoryCacheManager = repositoryCacheManager;
}
protected ResourcePattern toResourcePattern(String pattern) {
return isM2compatible() ? new M2ResourcePattern(pattern) : new IvyResourcePattern(pattern);
}
private boolean isChanging(String version) {
if (changingMatcherName == null || changingPattern == null) {
return false;
}
PatternMatcher matcher = resolverStrategy.getPatternMatcher(changingMatcherName);
if (matcher == null) {
throw new IllegalStateException("unknown matcher '" + changingMatcherName
+ "'. It is set as changing matcher in " + this);
}
return matcher.getMatcher(changingPattern).matches(version);
}
// TODO:DAZ Extract this properly: make this static
protected class ArtifactResolver {
private final List<String> ivyPatterns;
private final List<String> artifactPatterns;
public ArtifactResolver(List<String> ivyPatterns, List<String> artifactPatterns) {
this.ivyPatterns = ivyPatterns;
this.artifactPatterns = artifactPatterns;
}
public ExternalResource resolveMetaDataArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(ivyPatterns, artifact, true);
}
public ExternalResource resolveArtifact(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, true);
}
public boolean artifactExists(ModuleVersionArtifactMetaData artifact) {
return findStaticResourceUsingPatterns(artifactPatterns, artifact, false) != null;
}
private ExternalResource findStaticResourceUsingPatterns(List<String> patternList, ModuleVersionArtifactMetaData artifact, boolean forDownload) {
for (String pattern : patternList) {
ResourcePattern resourcePattern = toResourcePattern(pattern);
String resourceName = resourcePattern.toPath(artifact);
LOGGER.debug("Loading {}", resourceName);
ExternalResource resource = getResource(resourceName, artifact, forDownload);
if (resource.exists()) {
return resource;
} else {
LOGGER.debug("Resource not reachable for {}: res={}", artifact, resource);
discardResource(resource);
}
}
return null;
}
private ExternalResource getResource(String source, ModuleVersionArtifactMetaData target, boolean forDownload) {
try {
if (forDownload) {
LocallyAvailableResourceCandidates localCandidates = locallyAvailableResourceFinder.findCandidates(target);
ExternalResource resource = repository.getResource(source, localCandidates);
return resource == null ? new MissingExternalResource(source) : resource;
} else {
// TODO - there's a potential problem here in that we don't carry correct isLocal data in MetaDataOnlyExternalResource
ExternalResourceMetaData metaData = repository.getResourceMetaData(source);
return metaData == null ? new MissingExternalResource(source) : new MetaDataOnlyExternalResource(source, metaData);
}
} catch (IOException e) {
throw new RuntimeException(String.format("Could not get resource '%s'.", source), e);
}
}
protected void discardResource(ExternalResource resource) {
try {
resource.close();
} catch (IOException e) {
LOGGER.warn("Exception closing resource " + resource.getName(), e);
}
}
}
}
Diff Result
No diff
Case 18 - java_gradle.rev_16c71_dcc4b..IvyDependencyResolverAdapter.java
Base
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.module.descriptor.Artifact;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.resolution.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.resolution.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.resolution.SoftwareArtifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", dependency.getRequested(), getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", dependency.getRequested(), getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(doGetCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.module.descriptor.Artifact;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.resolution.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.resolution.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.resolution.SoftwareArtifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", dependency.getRequested(), getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", dependency.getRequested(), getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(doGetCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
Left
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.module.descriptor.Artifact;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.resolution.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.resolution.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.resolution.SoftwareArtifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", dependency.getRequested(), getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", dependency.getRequested(), getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(doGetCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.module.descriptor.Artifact;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.resolution.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.resolution.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.resolution.SoftwareArtifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getDependency(DependencyMetaData dependency, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", dependency.getRequested(), getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", dependency.getRequested(), getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends SoftwareArtifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(doGetCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
Right
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
org.apache.ivy.core.module.descriptor.Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
org.apache.ivy.core.module.descriptor.Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
MergeMethods
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[] { ivyArtifact }, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED && !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[] { ivyArtifact }, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED && !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
KeepBothMethods
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[] { ivyArtifact }, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
org.apache.ivy.core.module.descriptor.Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED && !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[] { ivyArtifact }, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
org.apache.ivy.core.module.descriptor.Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED && !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
Safe
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
<<<<<<< MINE
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
org.apache.ivy.core.module.descriptor.Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
=======
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
>>>>>>> YOURS
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
<<<<<<< MINE
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
org.apache.ivy.core.module.descriptor.Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Don't know how to get candidate artifacts of type %s", artifactType.getName()));
}
=======
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
>>>>>>> YOURS
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
Unstructured
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
<<<<<<< MINE
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
=======
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
>>>>>>> YOURS
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
<<<<<<< MINE
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
=======
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
org.apache.ivy.core.module.descriptor.Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
>>>>>>> YOURS
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.legacy;
import com.google.common.collect.ImmutableSet;
import org.apache.ivy.core.IvyContext;
import org.apache.ivy.core.report.ArtifactDownloadReport;
import org.apache.ivy.core.report.DownloadStatus;
import org.apache.ivy.core.resolve.DownloadOptions;
import org.apache.ivy.core.resolve.ResolveData;
import org.apache.ivy.core.resolve.ResolvedModuleRevision;
import org.apache.ivy.core.settings.IvySettings;
import org.apache.ivy.plugins.resolver.DependencyResolver;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.result.jvm.JvmLibraryJavadocArtifact;
import org.gradle.api.artifacts.result.jvm.JvmLibrarySourcesArtifact;
import org.gradle.api.artifacts.result.Artifact;
import org.gradle.api.internal.artifacts.ivyservice.*;
import org.gradle.api.internal.artifacts.ivyservice.ivyresolve.*;
import org.gradle.api.internal.artifacts.metadata.*;
<<<<<<< MINE
import org.gradle.api.internal.artifacts.resolution.IvyDescriptorArtifact;
=======
import org.gradle.api.internal.artifacts.result.jvm.ComponentMetaDataArtifact;
>>>>>>> YOURS
import org.gradle.internal.UncheckedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.text.ParseException;
import java.util.Collections;
import java.util.Set;
/**
* A {@link org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ModuleVersionRepository} wrapper around an Ivy {@link DependencyResolver}.
*/
public class IvyDependencyResolverAdapter implements ConfiguredModuleVersionRepository, IvyAwareModuleVersionRepository, LocalArtifactsModuleVersionRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(IvyDependencyResolverAdapter.class);
private final DownloadOptions downloadOptions = new DownloadOptions();
private final String identifier;
private final DependencyResolver resolver;
private ResolveData resolveData;
public IvyDependencyResolverAdapter(DependencyResolver resolver) {
this.resolver = resolver;
identifier = DependencyResolverIdentifier.forIvyResolver(resolver);
}
public String getId() {
return identifier;
}
public String getName() {
return resolver.getName();
}
@Override
public String toString() {
return String.format("Repository '%s'", resolver.getName());
}
public boolean isLocal() {
return resolver.getRepositoryCacheManager() instanceof LocalFileRepositoryCacheManager;
}
public void setSettings(IvySettings settings) {
settings.addResolver(resolver);
}
public void setResolveData(ResolveData resolveData) {
this.resolveData = resolveData;
}
public boolean isDynamicResolveMode() {
return false;
}
public void listModuleVersions(DependencyMetaData dependency, BuildableModuleVersionSelectionResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
result.listed(new DefaultModuleVersionListing());
} else {
result.listed(new DefaultModuleVersionListing(revision.getId().getRevision()));
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void getComponentMetaData(DependencyMetaData dependency, ModuleComponentIdentifier moduleComponent, BuildableModuleVersionMetaDataResolveResult result) {
IvyContext.getContext().setResolveData(resolveData);
try {
ResolvedModuleRevision revision = resolver.getDependency(dependency.getDescriptor(), resolveData);
if (revision == null) {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': not found", moduleComponent, getName());
result.missing();
} else {
LOGGER.debug("Performed resolved of module '{}' in repository '{}': found", moduleComponent, getName());
ModuleDescriptorAdapter metaData = new ModuleDescriptorAdapter(revision.getDescriptor());
metaData.setChanging(isChanging(revision));
result.resolved(metaData, null);
}
} catch (ParseException e) {
throw UncheckedException.throwAsUncheckedException(e);
}
}
public void resolveArtifact(ComponentArtifactMetaData artifact, ModuleSource moduleSource, BuildableArtifactResolveResult result) {
org.apache.ivy.core.module.descriptor.Artifact ivyArtifact = ((ModuleVersionArtifactMetaData) artifact).toIvyArtifact();
ArtifactDownloadReport artifactDownloadReport = resolver.download(new org.apache.ivy.core.module.descriptor.Artifact[]{ivyArtifact}, downloadOptions).getArtifactReport(ivyArtifact);
if (downloadFailed(artifactDownloadReport)) {
if (artifactDownloadReport instanceof EnhancedArtifactDownloadReport) {
EnhancedArtifactDownloadReport enhancedReport = (EnhancedArtifactDownloadReport) artifactDownloadReport;
result.failed(new ArtifactResolveException(artifact.getId(), enhancedReport.getFailure()));
} else {
result.failed(new ArtifactResolveException(artifact.getId(), artifactDownloadReport.getDownloadDetails()));
}
return;
}
File localFile = artifactDownloadReport.getLocalFile();
if (localFile != null) {
result.resolved(localFile);
} else {
result.notFound(artifact.getId());
}
}
public void localResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, true);
}
public void resolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result) {
doResolveModuleArtifacts(component, context, result, false);
}
// TODO:DAZ This "local-only" pattern is quite ugly: improve it.
private void doResolveModuleArtifacts(ComponentMetaData component, ArtifactResolveContext context, BuildableArtifactSetResolveResult result, boolean localOnly) {
ModuleVersionMetaData moduleVersion = (ModuleVersionMetaData) component;
if (context instanceof ConfigurationResolveContext) {
String configurationName = ((ConfigurationResolveContext) context).getConfigurationName();
result.resolved(component.getConfiguration(configurationName).getArtifacts());
return;
}
if (!localOnly && context instanceof ArtifactTypeResolveContext) {
Class<? extends Artifact> artifactType = ((ArtifactTypeResolveContext) context).getArtifactType();
try {
result.resolved(getCandidateArtifacts(moduleVersion, artifactType));
} catch (Exception e) {
result.failed(new ArtifactResolveException(component.getComponentId(), e));
}
}
}
<<<<<<< MINE
private Set<ModuleVersionArtifactMetaData> doGetCandidateArtifacts(ModuleVersionMetaData module, Class<? extends SoftwareArtifact> artifactType) {
if (artifactType == IvyDescriptorArtifact.class) {
Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
=======
private Set<ModuleVersionArtifactMetaData> getCandidateArtifacts(ModuleVersionMetaData module, Class<? extends Artifact> artifactType) {
if (artifactType == ComponentMetaDataArtifact.class) {
org.apache.ivy.core.module.descriptor.Artifact metadataArtifact = module.getDescriptor().getMetadataArtifact();
>>>>>>> YOURS
return ImmutableSet.of(module.artifact(metadataArtifact));
}
if (artifactType == JvmLibraryJavadocArtifact.class) {
return createArtifactMetaData(module, "javadoc", "javadoc");
}
if (artifactType == JvmLibrarySourcesArtifact.class) {
return createArtifactMetaData(module, "source", "sources");
}
throw new IllegalArgumentException(String.format("Cannot find artifacts of type %s in %s", artifactType.getName(), module));
}
private Set<ModuleVersionArtifactMetaData> createArtifactMetaData(ModuleVersionMetaData module, String type, String classifier) {
ModuleVersionArtifactMetaData artifact = module.artifact(type, "jar", classifier);
if (resolver.exists(artifact.toIvyArtifact())) {
return ImmutableSet.of(artifact);
}
return Collections.emptySet();
}
private boolean downloadFailed(ArtifactDownloadReport artifactReport) {
// Ivy reports FAILED with MISSING_ARTIFACT message when the artifact doesn't exist.
return artifactReport.getDownloadStatus() == DownloadStatus.FAILED
&& !artifactReport.getDownloadDetails().equals(ArtifactDownloadReport.MISSING_ARTIFACT);
}
private boolean isChanging(ResolvedModuleRevision resolvedModuleRevision) {
return new ChangingModuleDetector(resolver).isChangingModule(resolvedModuleRevision.getDescriptor());
}
}
Diff Result
No diff
Case 19 - java_gradle.rev_914f9_a8133..Configuration.java
Base
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
VersionConflictStrategy getVersionConflictStrategy();
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
ResolutionStrategy getResolution();
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED, RESOLVED, RESOLVED_WITH_FAILURES }
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
VersionConflictStrategy getVersionConflictStrategy();
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
ResolutionStrategy getResolution();
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED, RESOLVED, RESOLVED_WITH_FAILURES }
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
Left
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
ResolutionStrategy getResolutionStrategy();
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED, RESOLVED, RESOLVED_WITH_FAILURES }
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
ResolutionStrategy getResolutionStrategy();
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED, RESOLVED, RESOLVED_WITH_FAILURES }
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
Right
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* <strong>experimental</strong>
* This part of the api will change for sure!
* You can use it if you like but this part of the api will change without notice.
* <p>
*
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
/**
* <strong>experimental</strong>
* This part of the api will change for sure!
* You can use it if you like but this part of the api will change without notice.
* <p>
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
@Deprecated
ResolutionStrategy getResolution();
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED, RESOLVED, RESOLVED_WITH_FAILURES }
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* <strong>experimental</strong>
* This part of the api will change for sure!
* You can use it if you like but this part of the api will change without notice.
* <p>
*
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
/**
* <strong>experimental</strong>
* This part of the api will change for sure!
* You can use it if you like but this part of the api will change without notice.
* <p>
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
@Deprecated
ResolutionStrategy getResolution();
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED, RESOLVED, RESOLVED_WITH_FAILURES }
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
MergeMethods
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
ResolutionStrategy getResolutionStrategy()
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED , RESOLVED , RESOLVED_WITH_FAILURES}
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
ResolutionStrategy getResolutionStrategy()
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED , RESOLVED , RESOLVED_WITH_FAILURES}
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
KeepBothMethods
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
ResolutionStrategy getResolutionStrategy();
/**
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
@Deprecated
ResolutionStrategy getResolution();
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State {
UNRESOLVED(), RESOLVED(), RESOLVED_WITH_FAILURES()
}
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
ResolutionStrategy getResolutionStrategy();
/**
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
@Deprecated
ResolutionStrategy getResolution();
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State {
UNRESOLVED(), RESOLVED(), RESOLVED_WITH_FAILURES()
}
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
Safe
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
<<<<<<< MINE
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
=======
>>>>>>> YOURS
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
<<<<<<< MINE
@Deprecated
ResolutionStrategy getResolution();
=======
ResolutionStrategy getResolutionStrategy();
>>>>>>> YOURS
enum State { UNRESOLVED , RESOLVED , RESOLVED_WITH_FAILURES}
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
<<<<<<< MINE
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
=======
>>>>>>> YOURS
/**
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
<<<<<<< MINE
@Deprecated
ResolutionStrategy getResolution();
=======
ResolutionStrategy getResolutionStrategy();
>>>>>>> YOURS
enum State { UNRESOLVED , RESOLVED , RESOLVED_WITH_FAILURES}
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
Unstructured
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
<<<<<<< MINE
=======
* <strong>experimental</strong>
* This part of the api will change for sure!
* You can use it if you like but this part of the api will change without notice.
* <p>
*
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
/**
* <strong>experimental</strong>
* This part of the api will change for sure!
* You can use it if you like but this part of the api will change without notice.
* <p>
>>>>>>> YOURS
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
<<<<<<< MINE
ResolutionStrategy getResolutionStrategy();
=======
@Deprecated
ResolutionStrategy getResolution();
>>>>>>> YOURS
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED, RESOLVED, RESOLVED_WITH_FAILURES }
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.artifacts;
import groovy.lang.Closure;
import org.gradle.api.DomainObjectSet;
import org.gradle.api.file.FileCollection;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Map;
import java.util.Set;
/**
* <p>A {@code Configuration} represents a group of artifacts and their dependencies.</p>
*/
public interface Configuration extends FileCollection {
/**
<<<<<<< MINE
=======
* <strong>experimental</strong>
* This part of the api will change for sure!
* You can use it if you like but this part of the api will change without notice.
* <p>
*
* Returns the version conflict strategy used by this configuration
*
* @return strategy
*/
@Deprecated
VersionConflictStrategy getVersionConflictStrategy();
/**
* <strong>experimental</strong>
* This part of the api will change for sure!
* You can use it if you like but this part of the api will change without notice.
* <p>
>>>>>>> YOURS
* Returns the resolution strategy used by this configuration
*
* @return strategy
*/
<<<<<<< MINE
ResolutionStrategy getResolutionStrategy();
=======
@Deprecated
ResolutionStrategy getResolution();
>>>>>>> YOURS
/**
* The states a configuration can be into. A configuration is only mutable as long as it is
* in the unresolved state.
*/
enum State { UNRESOLVED, RESOLVED, RESOLVED_WITH_FAILURES }
/**
* Returns the state of the configuration.
*
* @see org.gradle.api.artifacts.Configuration.State
*/
State getState();
/**
* Returns the name of this configuration.
*
* @return The configuration name, never null.
*/
String getName();
/**
* A {@link org.gradle.api.Namer} namer for configurations that returns {@link #getName()}.
*/
static class Namer implements org.gradle.api.Namer<Configuration> {
public String determineName(Configuration c) {
return c.getName();
}
}
/**
* Returns true if this is a visible configuration. A visible configuration is usable outside the project it belongs
* to. The default value is true.
*
* @return true if this is a visible configuration.
*/
boolean isVisible();
/**
* Sets the visibility of this configuration. When visible is set to true, this configuration is visibile outside
* the project it belongs to. The default value is true.
*
* @param visible true if this is a visible configuration
* @return this configuration
*/
Configuration setVisible(boolean visible);
/**
* Returns the names of the configurations which this configuration extends from. The artifacts of the super
* configurations are also available in this configuration.
*
* @return The super configurations. Returns an empty set when this configuration does not extend any others.
*/
Set<Configuration> getExtendsFrom();
/**
* Sets the configurations which this configuration extends from.
*
* @param superConfigs The super configuration. Should not be null.
* @return this configuration
*/
Configuration setExtendsFrom(Set<Configuration> superConfigs);
/**
* Adds the given configurations to the set of configuration which this configuration extends from.
*
* @param superConfigs The super configurations.
* @return this configuration
*/
Configuration extendsFrom(Configuration... superConfigs);
/**
* Returns the transitivity of this configuration. A transitive configuration contains the transitive closure of its
* direct dependencies, and all their dependencies. An intransitive configuration contains only the direct
* dependencies. The default value is true.
*
* @return true if this is a transitive configuration, false otherwise.
*/
boolean isTransitive();
/**
* Sets the transitivity of this configuration. When set to true, this configuration will contain the transitive
* closure of its dependencies and their dependencies. The default value is true.
*
* @param t true if this is a transitive configuration.
* @return this configuration
*/
Configuration setTransitive(boolean t);
/**
* Returns the description for this configuration.
*
* @return the description. May be null.
*/
String getDescription();
/**
* Sets the description for this configuration.
*
* @param description the description. May be null
* @return this configuration
*/
Configuration setDescription(String description);
/**
* Gets a ordered set including this configuration and all superconfigurations
* recursively.
* @return the list of all configurations
*/
Set<Configuration> getHierarchy();
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* the resulting set of files.
*
* @return The files of this configuration.
*/
Set<File> resolve();
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #files(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Closure dependencySpecClosure);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the subset of dependencies specified by the dependencySpec
* is returned.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Spec<? super Dependency> dependencySpec);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration.
* But only the resulting set of files belonging to the specified dependencies
* is returned.
*
* @param dependencies The dependences to be resolved
* @return The files of a subset of dependencies of this configuration.
*/
Set<File> files(Dependency... dependencies);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to the subset
* of dependencies specified by the dependencySpec is contained in the FileCollection.
*
* @param dependencySpec The spec describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as
* {@link #fileCollection(org.gradle.api.specs.Spec)}.
*
* @param dependencySpecClosure The closure describing a filter applied to the all the dependencies of this configuration (including dependencies from extended configurations).
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Closure dependencySpecClosure);
/**
* Resolves this configuration lazily. The resolve happens when the elements of the returned FileCollection get accessed the first time.
* This locates and downloads the files which make up this configuration. Only the resulting set of files belonging to specified
* dependencies is contained in the FileCollection.
*
* @param dependencies The dependencies for which the FileCollection should contain the files.
* @return The FileCollection with a subset of dependencies of this configuration.
*/
FileCollection fileCollection(Dependency... dependencies);
/**
* Resolves this configuration. This locates and downloads the files which make up this configuration, and returns
* a ResolvedConfiguration that may be used to determine information about the resolve (including errors).
*
* @return The ResolvedConfiguration object
*/
ResolvedConfiguration getResolvedConfiguration();
/**
* Returns the name of the task that upload the artifacts of this configuration to repositories
* declared by the user.
*
* @see org.gradle.api.tasks.Upload
*/
String getUploadTaskName();
/**
* Returns a {@code TaskDependency} object containing all required dependencies to build the internal dependencies
* (e.g. project dependencies) belonging to this configuration or to one of its super configurations.
*
* @return a TaskDependency object
*/
TaskDependency getBuildDependencies();
/**
* Returns a TaskDependency object containing dependencies on all tasks with the specified name from project
* dependencies related to this configuration or one of its super configurations. These other projects may be
* projects this configuration depends on or projects with a similarly named configuration that depend on this one
* based on the useDependOn argument.
*
* @param useDependedOn if true, add tasks from project dependencies in this configuration, otherwise use projects
* from configurations with the same name that depend on this one.
* @param taskName name of task to depend on
* @return the populated TaskDependency object
*/
TaskDependency getTaskDependencyFromProjectDependency(boolean useDependedOn, final String taskName);
/**
* Returns a {@code TaskDependency} object containing all required tasks to build the artifacts
* belonging to this configuration or to one of its super configurations.
*
* @return a task dependency object
* @deprecated Use {@link PublishArtifactSet#getBuildDependencies()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
TaskDependency getBuildArtifacts();
/**
* Gets the set of dependencies directly contained in this configuration
* (ignoring superconfigurations).
*
* @return the set of dependencies
*/
DependencySet getDependencies();
/**
* <p>Gets the complete set of dependencies including those contributed by
* superconfigurations.</p>
*
* @return the (read-only) set of dependencies
*/
DependencySet getAllDependencies();
/**
* <p>Gets the set of dependencies of type T directly contained in this configuration (ignoring superconfigurations).</p>
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getDependencies(Class<T> type);
/**
* Gets the set of dependencies of type T for this configuration including those contributed by superconfigurations.
*
* <p>The returned set is live, in that any future dependencies added to this configuration that match the type will appear in the returned set.</p>
*
* @param type the dependency type
* @param <T> the dependency type
* @return The (read-only) set.
* @deprecated Use {@link DependencySet#withType(Class)} on {@link #getAllDependencies()} instead.
*/
@Deprecated
<T extends Dependency> DomainObjectSet<T> getAllDependencies(Class<T> type);
/**
* Adds a dependency to this configuration.
*
* @param dependency The dependency to be added.
* @deprecated Use {@link DependencySet#add(Object)} on {@link #getDependencies()} instead.
*/
@Deprecated
void addDependency(Dependency dependency);
/**
* Returns the artifacts of this configuration excluding the artifacts of extended configurations.
*
* @return The set.
*/
PublishArtifactSet getArtifacts();
/**
* Returns the artifacts of this configuration including the artifacts of extended configurations.
*
* @return The (read-only) set.
*/
PublishArtifactSet getAllArtifacts();
/**
* Returns the artifacts of this configuration as a {@link FileCollection}, including artifacts of extended
* configurations.
*
* @return the artifact files.
* @deprecated Use {@link PublishArtifactSet#getFiles()} on {@link #getAllArtifacts()} instead.
*/
@Deprecated
FileCollection getAllArtifactFiles();
/**
* Returns the exclude rules applied for resolving any dependency of this configuration.
*
* @see #exclude(java.util.Map)
*/
Set<ExcludeRule> getExcludeRules();
/**
* Adds an exclude rule to exclude transitive dependencies for all dependencies of this configuration.
* You can also add exclude rules per-dependency. See {@link ModuleDependency#exclude(java.util.Map)}.
*
* @param excludeProperties the properties to define the exclude rule.
* @return this
*/
Configuration exclude(Map<String, String> excludeProperties);
/**
* Returns all the configurations belonging to the same configuration container as this
* configuration (including this configuration).
*/
Set<Configuration> getAll();
/**
* Adds an artifact to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#add(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration addArtifact(PublishArtifact artifact);
/**
* Removes an artifact from the artifacts to be published to this configuration.
*
* @param artifact The artifact.
* @return this
* @deprecated Use {@link PublishArtifactSet#remove(Object)} on {@link #getArtifacts()} instead.
*/
@Deprecated
Configuration removeArtifact(PublishArtifact artifact);
/**
* Returns the incoming dependencies of this configuration.
*
* @return The incoming dependencies of this configuration. Never null.
*/
ResolvableDependencies getIncoming();
/**
* Creates a copy of this configuration that only contains the dependencies directly in this configuration
* (without contributions from superconfigurations). The new configuation will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copy();
/**
* Creates a copy of this configuration that contains the dependencies directly in this configuration
* and those derived from superconfigurations. The new configuration will be in the
* UNRESOLVED state, but will retain all other attributes of this configuration except superconfigurations.
* {@link #getHierarchy()} for the copy will not include any superconfigurations.
* @return copy of this configuration
*/
Configuration copyRecursive();
/**
* Creates a copy of this configuration ignoring superconfigurations (see {@link #copy()} but filtering
* the dependencies using the specified dependency spec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Spec<? super Dependency> dependencySpec);
/**
* Creates a copy of this configuration with dependencies from superconfigurations (see {@link #copyRecursive()})
* but filtering the dependencies using the dependencySpec.
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Spec<? super Dependency> dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copy(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copy(Closure dependencySpec);
/**
* Takes a closure which gets coerced into a Spec. Behaves otherwise in the same way as {@link #copyRecursive(org.gradle.api.specs.Spec)}
*
* @param dependencySpec filtering requirements
* @return copy of this configuration
*/
Configuration copyRecursive(Closure dependencySpec);
}
Diff Result
No diff
Case 20 - java_gradle.rev_cb507_bf1e6..DaemonClientServices.java
Base
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final Integer idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = new DaemonRegistryServices(daemonBaseDir);
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final Integer idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = new DaemonRegistryServices(daemonBaseDir);
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
Left
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final Integer idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final Integer idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
Right
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = new DaemonRegistryServices(daemonBaseDir);
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = new DaemonRegistryServices(daemonBaseDir);
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
MergeMethods
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
KeepBothMethods
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = new DaemonRegistryServices(daemonBaseDir);
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = new DaemonRegistryServices(daemonBaseDir);
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
Safe
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
<<<<<<< MINE
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = new DaemonRegistryServices(daemonBaseDir);
add(registryServices);
}
=======
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
>>>>>>> YOURS
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
<<<<<<< MINE
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = new DaemonRegistryServices(daemonBaseDir);
add(registryServices);
}
=======
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
>>>>>>> YOURS
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
Unstructured
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
<<<<<<< MINE
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
=======
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
>>>>>>> YOURS
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.daemon.client;
import org.gradle.api.internal.project.ServiceRegistry;
import org.gradle.launcher.daemon.context.DaemonContextBuilder;
import org.gradle.launcher.daemon.registry.DaemonDir;
import org.gradle.launcher.daemon.registry.DaemonRegistry;
import org.gradle.launcher.daemon.registry.DaemonRegistryServices;
import org.gradle.launcher.daemon.server.DaemonIdleTimeout;
import java.io.File;
/**
* Takes care of instantiating and wiring together the services required by the daemon client.
*/
public class DaemonClientServices extends DaemonClientServicesSupport {
private final int idleTimeout;
private final ServiceRegistry registryServices;
public DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices daemonBaseDir) {
this(loggingServices, daemonBaseDir, DaemonIdleTimeout.DEFAULT_IDLE_TIMEOUT);
}
<<<<<<< MINE
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, Integer idleTimeout) {
this(loggingServices, new DaemonRegistryServices(daemonBaseDir), idleTimeout);
}
private DaemonClientServices(ServiceRegistry loggingServices, DaemonRegistryServices registryServices, Integer idleTimeout) {
=======
public DaemonClientServices(ServiceRegistry loggingServices, File daemonBaseDir, int idleTimeout) {
>>>>>>> YOURS
super(loggingServices);
this.idleTimeout = idleTimeout;
this.registryServices = registryServices;
add(registryServices);
}
// here to satisfy DaemonClientServicesSupport contract
protected DaemonRegistry createDaemonRegistry() {
return registryServices.get(DaemonRegistry.class);
}
public Runnable makeDaemonStarter() {
return new DaemonStarter(registryServices.get(DaemonDir.class), idleTimeout);
}
protected void configureDaemonContextBuilder(DaemonContextBuilder builder) {
builder.setDaemonRegistryDir(registryServices.get(DaemonDir.class).getBaseDir());
}
}
Diff Result
No diff
Case 21 - java_infinispan.rev_a2154_744e9..EntryFactory.java
Base
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
void releaseLock(Object key);
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
boolean acquireLock(InvocationContext ctx, Object key) throws InterruptedException, TimeoutException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, Object key, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, InternalCacheEntry entry, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
void releaseLock(Object key);
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
boolean acquireLock(InvocationContext ctx, Object key) throws InterruptedException, TimeoutException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, Object key, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, InternalCacheEntry entry, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
}
Left
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
void releaseLock(InvocationContext ctx, Object key);
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
boolean acquireLock(InvocationContext ctx, Object key) throws InterruptedException, TimeoutException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, Object key, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, InternalCacheEntry entry, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
void releaseLock(InvocationContext ctx, Object key);
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
boolean acquireLock(InvocationContext ctx, Object key) throws InterruptedException, TimeoutException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, Object key, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, InternalCacheEntry entry, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
}
Right
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @author Mircea.Markus@jboss.com
* @since 4.0
*/
public interface EntryFactory {
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
*
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @author Mircea.Markus@jboss.com
* @since 4.0
*/
public interface EntryFactory {
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
*
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
MergeMethods
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
<<<<<<< MINE
void releaseLock(InvocationContext ctx, Object key);
=======
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
>>>>>>> YOURS
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
<<<<<<< MINE
void releaseLock(InvocationContext ctx, Object key);
=======
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
>>>>>>> YOURS
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
KeepBothMethods
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
void releaseLock(InvocationContext ctx, Object key);
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
void releaseLock(InvocationContext ctx, Object key);
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
Safe
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
<<<<<<< MINE
void releaseLock(InvocationContext ctx, Object key);
=======
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
>>>>>>> YOURS
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @since 4.0
*/
public interface EntryFactory {
<<<<<<< MINE
void releaseLock(InvocationContext ctx, Object key);
=======
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
>>>>>>> YOURS
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
Unstructured
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @author Mircea.Markus@jboss.com
* @since 4.0
*/
public interface EntryFactory {
<<<<<<< MINE
void releaseLock(InvocationContext ctx, Object key);
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
boolean acquireLock(InvocationContext ctx, Object key) throws InterruptedException, TimeoutException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, Object key, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, InternalCacheEntry entry, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
=======
>>>>>>> YOURS
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
*
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.container;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.util.concurrent.TimeoutException;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}, optionally acquiring the necessary locks via the
* {@link org.infinispan.util.concurrent.locks.LockManager}.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @author Mircea.Markus@jboss.com
* @since 4.0
*/
public interface EntryFactory {
<<<<<<< MINE
void releaseLock(InvocationContext ctx, Object key);
/**
* Attempts to lock an entry if the lock isn't already held in the current scope, and records the lock in the
* context.
*
* @param ctx context
* @param key Key to lock
* @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
* already held)
* @throws InterruptedException if interrupted
* @throws org.infinispan.util.concurrent.TimeoutException
* if we are unable to acquire the lock after a specified timeout.
*/
boolean acquireLock(InvocationContext ctx, Object key) throws InterruptedException, TimeoutException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted.
* @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, Object key, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
/**
* Wraps an entry for writing. This would typically acquire write locks if necessary, and place the wrapped
* entry in the invocation context.
*
* @param ctx current invocation context
* @param entry an internal entry to wrap
* @param createIfAbsent if true, an entry is created if it does not exist in the data container.
* @param forceLockIfAbsent forces a lock even if the entry is absent
* @param alreadyLocked if true, this hint prevents the method from acquiring any locks and the existence and ownership of the lock is presumed.
* @param forRemoval if true, this hint informs this method that the lock is being acquired for removal.
* @param undeleteIfNeeded if true, if the entry is found in the current scope (perhaps a transaction) and is deleted, it will be undeleted. If false, it will be considered deleted. * @return an MVCCEntry instance
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
MVCCEntry wrapEntryForWriting(InvocationContext ctx, InternalCacheEntry entry, boolean createIfAbsent, boolean forceLockIfAbsent, boolean alreadyLocked, boolean forRemoval, boolean undeleteIfNeeded) throws InterruptedException;
=======
>>>>>>> YOURS
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
*
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @return an entry for reading
* @throws InterruptedException when things go wrong, usually trying to acquire a lock
*/
CacheEntry wrapEntryForReading(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForClear(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForReplace(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForRemove(InvocationContext ctx, Object key) throws InterruptedException;
MVCCEntry wrapEntryForPut(InvocationContext ctx, Object key, InternalCacheEntry ice, boolean undeleteIfNeeded) throws InterruptedException;
}
Diff Result
No diff
Case 22 - java_infinispan.rev_a2154_744e9..TestCacheManagerFactory.java
Base
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering or transactions.
*/
public static EmbeddedCacheManager createLocalCacheManager() {
return createLocalCacheManager(false);
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
if (transactional) amendJTA(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void amendJTA(Configuration c) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(false);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration(), false);
}
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(false, defaultCacheConfig, transactional);
}
public static EmbeddedCacheManager createClusteredCacheManager(
boolean withFD, Configuration defaultCacheConfig, boolean transactional) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
if (transactional) amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
if (defaultCacheConfig.getTransactionManagerLookup() != null || defaultCacheConfig.getTransactionManagerLookupClass() != null) {
log.error("You have passed in a default configuration which has transactional elements set. If you wish to use transactions, use the TestCacheManagerFactory.createCacheManager(Configuration defaultCacheConfig, boolean transactional) method.");
}
defaultCacheConfig.setTransactionManagerLookup(null);
defaultCacheConfig.setTransactionManagerLookupClass(null);
return createCacheManager(defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig, boolean transactional) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
if (transactional) amendJTA(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
return createCacheManager(configuration, defaultCfg, false, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
if (transactional) amendJTA(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, transactional, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
if (transactional) amendJTA(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, false, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
if (transactional) amendJTA(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
if (transactional) amendJTA(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, false);
}
private static void amendTransport(GlobalConfiguration configuration, boolean withFD) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, withFD));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering or transactions.
*/
public static EmbeddedCacheManager createLocalCacheManager() {
return createLocalCacheManager(false);
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
if (transactional) amendJTA(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void amendJTA(Configuration c) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(false);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration(), false);
}
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(false, defaultCacheConfig, transactional);
}
public static EmbeddedCacheManager createClusteredCacheManager(
boolean withFD, Configuration defaultCacheConfig, boolean transactional) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
if (transactional) amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
if (defaultCacheConfig.getTransactionManagerLookup() != null || defaultCacheConfig.getTransactionManagerLookupClass() != null) {
log.error("You have passed in a default configuration which has transactional elements set. If you wish to use transactions, use the TestCacheManagerFactory.createCacheManager(Configuration defaultCacheConfig, boolean transactional) method.");
}
defaultCacheConfig.setTransactionManagerLookup(null);
defaultCacheConfig.setTransactionManagerLookupClass(null);
return createCacheManager(defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig, boolean transactional) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
if (transactional) amendJTA(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
return createCacheManager(configuration, defaultCfg, false, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
if (transactional) amendJTA(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, transactional, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
if (transactional) amendJTA(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, false, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
if (transactional) amendJTA(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
if (transactional) amendJTA(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, false);
}
private static void amendTransport(GlobalConfiguration configuration, boolean withFD) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, withFD));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
Left
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering or transactions.
*/
public static EmbeddedCacheManager createLocalCacheManager() {
return createLocalCacheManager(false);
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
if (transactional) amendJTA(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void amendJTA(Configuration c) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(new Configuration(), false, flags);
}
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, flags);
if (transactional) amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
if (defaultCacheConfig.getTransactionManagerLookup() != null || defaultCacheConfig.getTransactionManagerLookupClass() != null) {
log.error("You have passed in a default configuration which has transactional elements set. If you wish to use transactions, use the TestCacheManagerFactory.createCacheManager(Configuration defaultCacheConfig, boolean transactional) method.");
}
defaultCacheConfig.setTransactionManagerLookup(null);
defaultCacheConfig.setTransactionManagerLookupClass(null);
return createCacheManager(defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig, boolean transactional) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
if (transactional) amendJTA(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
return createCacheManager(configuration, defaultCfg, false, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
if (transactional) amendJTA(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, transactional, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
if (transactional) amendJTA(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, false, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
if (transactional) amendJTA(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
if (transactional) amendJTA(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering or transactions.
*/
public static EmbeddedCacheManager createLocalCacheManager() {
return createLocalCacheManager(false);
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
if (transactional) amendJTA(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void amendJTA(Configuration c) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(new Configuration(), false, flags);
}
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, flags);
if (transactional) amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
if (defaultCacheConfig.getTransactionManagerLookup() != null || defaultCacheConfig.getTransactionManagerLookupClass() != null) {
log.error("You have passed in a default configuration which has transactional elements set. If you wish to use transactions, use the TestCacheManagerFactory.createCacheManager(Configuration defaultCacheConfig, boolean transactional) method.");
}
defaultCacheConfig.setTransactionManagerLookup(null);
defaultCacheConfig.setTransactionManagerLookupClass(null);
return createCacheManager(defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig, boolean transactional) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
if (transactional) amendJTA(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
return createCacheManager(configuration, defaultCfg, false, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
if (transactional) amendJTA(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, transactional, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean transactional, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
if (transactional) amendJTA(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, false, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
if (transactional) amendJTA(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
if (transactional) amendJTA(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
Right
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache()) amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(false);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
public static EmbeddedCacheManager createClusteredCacheManager( boolean withFD, Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, false);
}
private static void amendTransport(GlobalConfiguration configuration, boolean withFD) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, withFD));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache()) amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(false);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
public static EmbeddedCacheManager createClusteredCacheManager( boolean withFD, Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, false);
}
private static void amendTransport(GlobalConfiguration configuration, boolean withFD) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, withFD));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
MergeMethods
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(xmlFile, InfinispanConfiguration.resolveSchemaPath(), Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains)
gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering or transactions.
*/
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1)
c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache())
amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(withFD, new Configuration());
}
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, flags);
if (transactional)
amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD, Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
} else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport)
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) {
//this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(threadName.indexOf("-") + 1, threadName.indexOf('('));
}
// else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING, getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0)
return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + "!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" + "!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(xmlFile, InfinispanConfiguration.resolveSchemaPath(), Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains)
gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering or transactions.
*/
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1)
c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache())
amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(withFD, new Configuration());
}
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, flags);
if (transactional)
amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD, Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
} else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport)
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) {
//this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(threadName.indexOf("-") + 1, threadName.indexOf('('));
}
// else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING, getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0)
return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + "!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" + "!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
KeepBothMethods
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(xmlFile, InfinispanConfiguration.resolveSchemaPath(), Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains)
gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering or transactions.
*/
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1)
c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache())
amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(new Configuration(), false, flags);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration());
}
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, flags);
if (transactional)
amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD, Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
} else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport)
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) {
//this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(threadName.indexOf("-") + 1, threadName.indexOf('('));
}
// else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING, getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0)
return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + "!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" + "!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(xmlFile, InfinispanConfiguration.resolveSchemaPath(), Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains)
gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering or transactions.
*/
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1)
c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache())
amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(new Configuration(), false, flags);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration());
}
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, flags);
if (transactional)
amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD, Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
} else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport)
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) {
//this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(threadName.indexOf("-") + 1, threadName.indexOf('('));
}
// else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING, getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0)
return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + "!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" + "!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
Safe
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache()) amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
<<<<<<< MINE
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration());
}
=======
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(new Configuration(), false, flags);
}
>>>>>>> YOURS
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
<<<<<<< MINE
=======
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
>>>>>>> YOURS
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, flags);
if (transactional) amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager( boolean withFD, Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache()) amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
<<<<<<< MINE
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration());
}
=======
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(new Configuration(), false, flags);
}
>>>>>>> YOURS
/**
* Creates an cache manager that does support clustering with a given default cache configuration.
*/
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
<<<<<<< MINE
=======
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
>>>>>>> YOURS
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, flags);
if (transactional) amendJTA(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createClusteredCacheManager( boolean withFD, Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
/**
* Creates a local cache manager and amends so that it won't conflict (e.g. jmx) with other managers whilst running
* tests in parallel. This is a non-transactional cache manager.
*/
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
Unstructured
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache()) amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
<<<<<<< MINE
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(new Configuration(), false, flags);
=======
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration());
>>>>>>> YOURS
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
<<<<<<< MINE
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
=======
public static EmbeddedCacheManager createClusteredCacheManager( boolean withFD, Configuration defaultCacheConfig) {
>>>>>>> YOURS
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
<<<<<<< MINE
amendTransport(globalConfiguration, flags);
if (transactional) amendJTA(defaultCacheConfig);
=======
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
>>>>>>> YOURS
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.test.fwk;
import org.infinispan.config.Configuration;
import org.infinispan.config.FluentConfiguration;
import org.infinispan.config.GlobalConfiguration;
import org.infinispan.config.InfinispanConfiguration;
import org.infinispan.jmx.PerThreadMBeanServerLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.LegacyKeySupportSystemProperties;
import org.infinispan.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import static org.infinispan.test.fwk.JGroupsConfigBuilder.getJGroupsConfig;
/**
* CacheManagers in unit tests should be created with this factory, in order to avoid resource clashes. See
* http://community.jboss.org/wiki/ParallelTestSuite for more details.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
*/
public class TestCacheManagerFactory {
private static AtomicInteger jmxDomainPostfix = new AtomicInteger();
public static final String MARSHALLER = LegacyKeySupportSystemProperties.getProperty("infinispan.test.marshaller.class", "infinispan.marshaller.class");
private static final Log log = LogFactory.getLog(TestCacheManagerFactory.class);
private static ThreadLocal<PerThreadCacheManagers> perThreadCacheManagers = new ThreadLocal<PerThreadCacheManagers>() {
@Override
protected PerThreadCacheManagers initialValue() {
return new PerThreadCacheManagers();
}
};
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c, boolean keepJmxDomain) {
if (!keepJmxDomain) {
gc.setJmxDomain("infinispan" + jmxDomainPostfix.incrementAndGet());
}
return newDefaultCacheManager(start, gc, c);
}
public static EmbeddedCacheManager fromXml(String xmlFile, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
xmlFile,
InfinispanConfiguration.resolveSchemaPath(),
Thread.currentThread().getContextClassLoader());
return fromConfigFileParser(parser, allowDupeDomains);
}
public static EmbeddedCacheManager fromXml(String xmlFile) throws IOException {
return fromXml(xmlFile, false);
}
public static EmbeddedCacheManager fromStream(InputStream is) throws IOException {
return fromStream(is, false);
}
public static EmbeddedCacheManager fromStream(InputStream is, boolean allowDupeDomains) throws IOException {
InfinispanConfiguration parser = InfinispanConfiguration.newInfinispanConfiguration(
is, InfinispanConfiguration.findSchemaInputStream());
return fromConfigFileParser(parser, allowDupeDomains);
}
private static EmbeddedCacheManager fromConfigFileParser(InfinispanConfiguration parser, boolean allowDupeDomains) {
GlobalConfiguration gc = parser.parseGlobalConfiguration();
if (allowDupeDomains) gc.setAllowDuplicateDomains(true);
Map<String, Configuration> named = parser.parseNamedConfigurations();
Configuration c = parser.parseDefaultConfiguration();
minimizeThreads(gc);
amendTransport(gc);
EmbeddedCacheManager cm = newDefaultCacheManager(true, gc, c, false);
for (Map.Entry<String, Configuration> e : named.entrySet()) cm.defineConfiguration(e.getKey(), e.getValue());
cm.start();
return cm;
}
/**
* Creates an cache manager that does not support clustering.
*
* @param transactional if true, the cache manager will support transactions by default.
*/
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional) {
return createLocalCacheManager(transactional, -1);
}
public static EmbeddedCacheManager createLocalCacheManager(boolean transactional, long lockAcquisitionTimeout) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
Configuration c = new Configuration();
markAsTransactional(transactional, c);
if (lockAcquisitionTimeout > -1) c.setLockAcquisitionTimeout(lockAcquisitionTimeout);
updateTransactionSupport(c);
return newDefaultCacheManager(true, globalConfiguration, c, false);
}
private static void markAsTransactional(boolean transactional, Configuration c) {
c.fluent().transaction().transactionMode(transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL);
}
private static void updateTransactionSupport(Configuration c) {
if (c.isTransactionalCache()) amendJTA(c);
}
private static void amendJTA(Configuration c) {
if (c.getTransactionManagerLookupClass() == null && c.getTransactionManagerLookup() == null) {
c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
}
}
/**
* Creates an cache manager that does support clustering.
*/
public static EmbeddedCacheManager createClusteredCacheManager() {
return createClusteredCacheManager(new TransportFlags());
}
<<<<<<< MINE
public static EmbeddedCacheManager createClusteredCacheManager(TransportFlags flags) {
return createClusteredCacheManager(new Configuration(), false, flags);
=======
public static EmbeddedCacheManager createClusteredCacheManager(boolean withFD) {
return createClusteredCacheManager(withFD, new Configuration());
>>>>>>> YOURS
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig) {
return createClusteredCacheManager(false, defaultCacheConfig);
}
<<<<<<< MINE
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional) {
return createClusteredCacheManager(defaultCacheConfig, transactional, new TransportFlags());
}
public static EmbeddedCacheManager createClusteredCacheManager(Configuration defaultCacheConfig, boolean transactional, TransportFlags flags) {
=======
public static EmbeddedCacheManager createClusteredCacheManager( boolean withFD, Configuration defaultCacheConfig) {
>>>>>>> YOURS
GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
<<<<<<< MINE
amendTransport(globalConfiguration, flags);
if (transactional) amendJTA(defaultCacheConfig);
=======
amendTransport(globalConfiguration, withFD);
updateTransactionSupport(defaultCacheConfig);
>>>>>>> YOURS
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
/**
* Creates a cache manager and amends the supplied configuration in order to avoid conflicts (e.g. jmx, jgroups)
* during running tests in parallel.
*/
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, false);
}
public static EmbeddedCacheManager createCacheManager(boolean start, GlobalConfiguration configuration) {
return internalCreateJmxDomain(start, configuration, false);
}
/**
* Creates a cache manager that won't try to modify the configured jmx domain name: {@link
* org.infinispan.config.GlobalConfiguration#getJmxDomain()}}. This method must be used with care, and one should
* make sure that no domain name collision happens when the parallel suite executes. An approach to ensure this, is
* to set the domain name to the name of the test class that instantiates the CacheManager.
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(GlobalConfiguration configuration) {
return internalCreateJmxDomain(true, configuration, true);
}
private static EmbeddedCacheManager internalCreateJmxDomain(boolean start, GlobalConfiguration configuration, boolean enforceJmxDomain) {
amendMarshaller(configuration);
minimizeThreads(configuration);
amendTransport(configuration);
return newDefaultCacheManager(start, configuration, new Configuration(), enforceJmxDomain);
}
public static EmbeddedCacheManager createCacheManager(Configuration.CacheMode mode, boolean indexing) {
GlobalConfiguration gc = mode.isClustered() ? GlobalConfiguration.getClusteredDefault() : GlobalConfiguration.getNonClusteredDefault();
Configuration c = new Configuration();
FluentConfiguration fluentConfiguration = c.fluent();
if (indexing) {
//The property is not really needed as it defaults to the same value,
//but since it's recommended we set it explicitly to avoid logging a noisy warning.
fluentConfiguration.indexing().addProperty("hibernate.search.lucene_version", "LUCENE_CURRENT");
}
fluentConfiguration.mode(mode);
return createCacheManager(gc, fluentConfiguration.build());
}
public static EmbeddedCacheManager createCacheManager(Configuration defaultCacheConfig) {
GlobalConfiguration globalConfiguration;
if (defaultCacheConfig.getCacheMode().isClustered()) {
globalConfiguration = GlobalConfiguration.getClusteredDefault();
amendTransport(globalConfiguration);
}
else {
globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
}
amendMarshaller(globalConfiguration);
minimizeThreads(globalConfiguration);
updateTransactionSupport(defaultCacheConfig);
// we stop caches during transactions all the time
// so wait at most 1 second for ongoing transactions when stopping
defaultCacheConfig.fluent().cacheStopTimeout(1000);
return newDefaultCacheManager(true, globalConfiguration, defaultCacheConfig, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg) {
minimizeThreads(configuration);
amendMarshaller(configuration);
amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName) {
return createCacheManager(configuration, defaultCfg, keepJmxDomainName, false);
}
public static EmbeddedCacheManager createCacheManager(GlobalConfiguration configuration, Configuration defaultCfg, boolean keepJmxDomainName, boolean dontFixTransport) {
minimizeThreads(configuration);
amendMarshaller(configuration);
if (!dontFixTransport) amendTransport(configuration);
updateTransactionSupport(defaultCfg);
return newDefaultCacheManager(true, configuration, defaultCfg, keepJmxDomainName);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain) {
return createCacheManagerEnforceJmxDomain(jmxDomain, true, true);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
return createCacheManagerEnforceJmxDomain(jmxDomain, null, exposeGlobalJmx, exposeCacheJmx);
}
/**
* @see #createCacheManagerEnforceJmxDomain(String)
*/
public static EmbeddedCacheManager createCacheManagerEnforceJmxDomain(String jmxDomain, String cacheManagerName, boolean exposeGlobalJmx, boolean exposeCacheJmx) {
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
globalConfiguration.setJmxDomain(jmxDomain);
if (cacheManagerName != null)
globalConfiguration.setCacheManagerName(cacheManagerName);
globalConfiguration.setMBeanServerLookup(PerThreadMBeanServerLookup.class.getName());
globalConfiguration.setExposeGlobalJmxStatistics(exposeGlobalJmx);
Configuration configuration = new Configuration();
configuration.setExposeJmxStatistics(exposeCacheJmx);
return createCacheManager(globalConfiguration, configuration, true);
}
public static Configuration getDefaultConfiguration(boolean transactional) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
return c;
}
public static Configuration getDefaultConfiguration(boolean transactional, Configuration.CacheMode cacheMode) {
Configuration c = new Configuration();
markAsTransactional(transactional, c);
updateTransactionSupport(c);
c.setCacheMode(cacheMode);
if (cacheMode.isClustered()) {
c.setSyncRollbackPhase(true);
c.setSyncCommitPhase(true);
}
return c;
}
private static void amendTransport(GlobalConfiguration cfg) {
amendTransport(cfg, new TransportFlags());
}
private static void amendTransport(GlobalConfiguration configuration, TransportFlags flags) {
if (configuration.getTransportClass() != null) { //this is local
Properties newTransportProps = new Properties();
Properties previousSettings = configuration.getTransportProperties();
if (previousSettings != null) {
newTransportProps.putAll(previousSettings);
}
String fullTestName = perThreadCacheManagers.get().fullTestName;
String nextCacheName = perThreadCacheManagers.get().getNextCacheName();
if (fullTestName == null) {
// Either we're running from within the IDE or it's a
// @Test(timeOut=nnn) test. We rely here on some specific TestNG
// thread naming convention which can break, but TestNG offers no
// other alternative. It does not offer any callbacks within the
// thread that runs the test that can timeout.
String threadName = Thread.currentThread().getName();
String pattern = "TestNGInvoker-";
if (threadName.startsWith(pattern)) {
// This is a timeout test, so for the moment rely on the test
// method name that comes in the thread name.
fullTestName = threadName;
nextCacheName = threadName.substring(
threadName.indexOf("-") + 1, threadName.indexOf('('));
} // else, test is being run from IDE
}
newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING,
getJGroupsConfig(fullTestName, flags));
configuration.setTransportProperties(newTransportProps);
configuration.setTransportNodeName(nextCacheName);
}
}
public static void minimizeThreads(GlobalConfiguration gc) {
Properties p = new Properties();
p.setProperty("maxThreads", "1");
gc.setAsyncTransportExecutorProperties(p);
}
public static void amendMarshaller(GlobalConfiguration configuration) {
if (MARSHALLER != null) {
try {
Util.loadClassStrict(MARSHALLER, Thread.currentThread().getContextClassLoader());
configuration.setMarshallerClass(MARSHALLER);
} catch (ClassNotFoundException e) {
// No-op, stick to GlobalConfiguration default.
}
}
}
private static DefaultCacheManager newDefaultCacheManager(boolean start, GlobalConfiguration gc, Configuration c) {
DefaultCacheManager defaultCacheManager = new DefaultCacheManager(gc, c, start);
PerThreadCacheManagers threadCacheManagers = perThreadCacheManagers.get();
String methodName = extractMethodName();
log.trace("Adding DCM (" + defaultCacheManager.getAddress() + ") for method: '" + methodName + "'");
threadCacheManagers.add(methodName, defaultCacheManager);
return defaultCacheManager;
}
private static String extractMethodName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0) return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if ((className.indexOf("org.infinispan") != -1) && className.indexOf("org.infinispan.test") < 0)
return e.toString();
}
return null;
}
static void testStarted(String testName, String fullName) {
perThreadCacheManagers.get().setTestName(testName, fullName);
}
static void testFinished(String testName) {
perThreadCacheManagers.get().checkManagersClosed(testName);
perThreadCacheManagers.get().unsetTestName();
}
private static class PerThreadCacheManagers {
String testName = null;
private String oldThreadName;
HashMap<EmbeddedCacheManager, String> cacheManagers = new HashMap<EmbeddedCacheManager, String>();
String fullTestName;
public void checkManagersClosed(String testName) {
for (Map.Entry<EmbeddedCacheManager, String> cmEntry : cacheManagers.entrySet()) {
if (cmEntry.getKey().getStatus().allowInvocations()) {
String thName = Thread.currentThread().getName();
String errorMessage = '\n' +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"!!!!!! (" + thName + ") Exiting because " + testName + " has NOT shut down all the cache managers it has started !!!!!!!\n" +
"!!!!!! (" + thName + ") The still-running cacheManager was created here: " + cmEntry.getValue() + " !!!!!!!\n" +
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
log.error(errorMessage);
System.err.println(errorMessage);
System.exit(9);
}
}
cacheManagers.clear();
}
public String getNextCacheName() {
int index = cacheManagers.size();
char name = (char) ((int) 'A' + index);
return (testName != null ? testName + "-" : "") + "Node" + name;
}
public void add(String methodName, DefaultCacheManager cm) {
cacheManagers.put(cm, methodName);
}
public void setTestName(String testName, String fullTestName) {
this.testName = testName;
this.fullTestName = fullTestName;
this.oldThreadName = Thread.currentThread().getName();
Thread.currentThread().setName("testng-" + testName);
}
public void unsetTestName() {
this.testName = null;
Thread.currentThread().setName(oldThreadName);
this.oldThreadName = null;
}
}
}
Diff Result
No diff
Case 23 - java_jedis.rev_155af_88e66..BinaryClient.java
Base
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE, AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final byte[] field : hash.keySet()) {
params.add(field);
params.add(hash.get(field));
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[] field) {
sendCommand(HDEL, key, field);
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[] string) {
sendCommand(RPUSH, key, string);
}
public void lpush(final byte[] key, final byte[] string) {
sendCommand(LPUSH, key, string);
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[] member) {
sendCommand(SADD, key, member);
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[] member) {
sendCommand(SREM, key, member);
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zrange(final byte[] key, final int start, final int end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[] member) {
sendCommand(ZREM, key, member);
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final int start, final int end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final int start,
final int end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final int start,
final int end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final double min, final double max) {
sendCommand(ZCOUNT, key, toByteArray(min), toByteArray(max));
}
public void zrangeByScore(final byte[] key, final double min,
final double max) {
sendCommand(ZRANGEBYSCORE, key, toByteArray(min), toByteArray(max));
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min) {
sendCommand(ZREVRANGEBYSCORE, key, toByteArray(max), toByteArray(min));
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScore(final byte[] key, final double min,
final double max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, toByteArray(min), toByteArray(max),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, toByteArray(max), toByteArray(min),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max) {
sendCommand(ZRANGEBYSCORE, key, toByteArray(min), toByteArray(max),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min) {
sendCommand(ZREVRANGEBYSCORE, key, toByteArray(max), toByteArray(min),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, toByteArray(min), toByteArray(max),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, toByteArray(max), toByteArray(min),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final int start, final int end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final double start,
final double end) {
sendCommand(ZREMRANGEBYSCORE, key, toByteArray(start), toByteArray(end));
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final String pattern) {
sendCommand(CONFIG, Keyword.GET.name(), pattern);
}
public void configSet(final String parameter, final String value) {
sendCommand(CONFIG, Keyword.SET.name(), parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[] string) {
sendCommand(LPUSHX, key, string);
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[] string) {
sendCommand(RPUSHX, key, string);
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
}
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE, AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final byte[] field : hash.keySet()) {
params.add(field);
params.add(hash.get(field));
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[] field) {
sendCommand(HDEL, key, field);
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[] string) {
sendCommand(RPUSH, key, string);
}
public void lpush(final byte[] key, final byte[] string) {
sendCommand(LPUSH, key, string);
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[] member) {
sendCommand(SADD, key, member);
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[] member) {
sendCommand(SREM, key, member);
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zrange(final byte[] key, final int start, final int end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[] member) {
sendCommand(ZREM, key, member);
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final int start, final int end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final int start,
final int end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final int start,
final int end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final double min, final double max) {
sendCommand(ZCOUNT, key, toByteArray(min), toByteArray(max));
}
public void zrangeByScore(final byte[] key, final double min,
final double max) {
sendCommand(ZRANGEBYSCORE, key, toByteArray(min), toByteArray(max));
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min) {
sendCommand(ZREVRANGEBYSCORE, key, toByteArray(max), toByteArray(min));
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScore(final byte[] key, final double min,
final double max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, toByteArray(min), toByteArray(max),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, toByteArray(max), toByteArray(min),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max) {
sendCommand(ZRANGEBYSCORE, key, toByteArray(min), toByteArray(max),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min) {
sendCommand(ZREVRANGEBYSCORE, key, toByteArray(max), toByteArray(min),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, toByteArray(min), toByteArray(max),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, toByteArray(max), toByteArray(min),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final int start, final int end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final double start,
final double end) {
sendCommand(ZREMRANGEBYSCORE, key, toByteArray(start), toByteArray(end));
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final String pattern) {
sendCommand(CONFIG, Keyword.GET.name(), pattern);
}
public void configSet(final String parameter, final String value) {
sendCommand(CONFIG, Keyword.SET.name(), parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[] string) {
sendCommand(LPUSHX, key, string);
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[] string) {
sendCommand(RPUSHX, key, string);
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
}
Left
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE, AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(
scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final long start,
final long end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte[] start,
final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script,
byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++)
allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++)
args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch (op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE, AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(
scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final long start,
final long end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte[] start,
final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script,
byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++)
allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++)
args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch (op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
Right
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE, AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final byte[] field : hash.keySet()) {
params.add(field);
params.add(hash.get(field));
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[] field) {
sendCommand(HDEL, key, field);
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[] string) {
sendCommand(RPUSH, key, string);
}
public void lpush(final byte[] key, final byte[] string) {
sendCommand(LPUSH, key, string);
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[] member) {
sendCommand(SADD, key, member);
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[] member) {
sendCommand(SREM, key, member);
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zrange(final byte[] key, final int start, final int end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[] member) {
sendCommand(ZREM, key, member);
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final int start, final int end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final int start,
final int end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final int start,
final int end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
public void zrangeByScore(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
public void zrangeByScore(final byte[] key, final double min,
final double max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final byte min[],
final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min,
final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte max[],
final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final int start, final int end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final double start,
final double end) {
sendCommand(ZREMRANGEBYSCORE, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte start[],
final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start,
final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final String pattern) {
sendCommand(CONFIG, Keyword.GET.name(), pattern);
}
public void configSet(final String parameter, final String value) {
sendCommand(CONFIG, Keyword.SET.name(), parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[] string) {
sendCommand(LPUSHX, key, string);
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[] string) {
sendCommand(RPUSHX, key, string);
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
}
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE, AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final byte[] field : hash.keySet()) {
params.add(field);
params.add(hash.get(field));
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[] field) {
sendCommand(HDEL, key, field);
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[] string) {
sendCommand(RPUSH, key, string);
}
public void lpush(final byte[] key, final byte[] string) {
sendCommand(LPUSH, key, string);
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[] member) {
sendCommand(SADD, key, member);
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[] member) {
sendCommand(SREM, key, member);
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zrange(final byte[] key, final int start, final int end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[] member) {
sendCommand(ZREM, key, member);
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final int start, final int end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final int start,
final int end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final int start,
final int end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
public void zrangeByScore(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
public void zrangeByScore(final byte[] key, final double min,
final double max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final byte min[],
final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min,
final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte max[],
final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final int start, final int end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final double start,
final double end) {
sendCommand(ZREMRANGEBYSCORE, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte start[],
final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start,
final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final String pattern) {
sendCommand(CONFIG, Keyword.GET.name(), pattern);
}
public void configSet(final String parameter, final String value) {
sendCommand(CONFIG, Keyword.SET.name(), parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[] string) {
sendCommand(LPUSHX, key, string);
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[] string) {
sendCommand(RPUSHX, key, string);
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
}
MergeMethods
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE(), AFTER();
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey, final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score, final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters, final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
}
public void zrange(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
public void zrange(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
}
public void zrangeByScore(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min, final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zremrangeByRank(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max, final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max, final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zremrangeByScore(final byte[] key, final byte[] start, final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zrevrangeByScore(final byte[] key, final String max, final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
public void zrangeByScore(final byte[] key, final byte[] min, final byte[] max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final byte min[], final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min, final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte[] max, final byte[] min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte max[], final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max, final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeWithScores(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min, final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[], final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max, final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min, final byte[] max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[], final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min, final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max, final byte[] min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[], final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max, final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zremrangeByScore(final byte[] key, final byte start[], final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start, final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params, final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params, final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where, final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination, final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset), toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script, byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++) allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++) args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch(op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE(), AFTER();
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey, final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score, final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters, final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
}
public void zrange(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
public void zrange(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
}
public void zrangeByScore(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min, final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zremrangeByRank(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max, final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max, final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zremrangeByScore(final byte[] key, final byte[] start, final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zrevrangeByScore(final byte[] key, final String max, final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
public void zrangeByScore(final byte[] key, final byte[] min, final byte[] max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final byte min[], final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min, final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte[] max, final byte[] min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte max[], final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max, final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeWithScores(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min, final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final long start, final long end) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[], final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max, final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min, final byte[] max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[], final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min, final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max, final byte[] min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[], final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max, final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zremrangeByScore(final byte[] key, final byte start[], final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start, final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params, final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params, final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where, final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination, final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset), toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script, byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++) allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++) args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch(op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
KeepBothMethods
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE(), AFTER();
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey, final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score, final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end), WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end), WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters, final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
public void zrangeByScore(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
}
public void zrangeByScore(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min, final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zrevrangeByScore(final byte[] key, final double max, final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max, final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScore(final byte[] key, final byte[] min, final byte[] max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte[] max, final byte[] min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max, final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min, final byte[] max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max, final byte[] min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final long start, final long end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte[] start, final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zrevrangeByScore(final byte[] key, final String max, final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
public void zrangeByScore(final byte[] key, final double min, final double max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final byte min[], final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min, final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final double max, final double min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte max[], final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max, final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min, final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max, final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[], final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max, final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final double min, final double max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[], final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min, final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max, final double min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[], final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max, final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zremrangeByScore(final byte[] key, final byte start[], final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start, final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params, final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params, final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where, final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination, final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset), toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script, byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++) allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++) args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch(op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE(), AFTER();
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey, final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score, final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end), WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end), WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters, final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
public void zrangeByScore(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
}
public void zrangeByScore(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min, final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zrevrangeByScore(final byte[] key, final double max, final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max, final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScore(final byte[] key, final byte[] min, final byte[] max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte[] max, final byte[] min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max, final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min, final byte[] max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max, final byte[] min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final long start, final long end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte[] start, final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zrevrangeByScore(final byte[] key, final String max, final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
public void zrangeByScore(final byte[] key, final double min, final double max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final byte min[], final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min, final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final double max, final double min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte max[], final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max, final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScoreWithScores(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min, final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max, final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[], final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max, final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final double min, final double max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[], final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min, final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final double max, final double min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[], final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max, final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(), LIMIT.raw, toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
public void zremrangeByScore(final byte[] key, final byte start[], final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start, final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params, final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params, final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where, final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination, final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset), toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script, byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++) allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++) args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch(op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
Safe
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE , AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(
scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
}
<<<<<<< MINE
public void zcount(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
=======
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
>>>>>>> YOURS
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
<<<<<<< MINE
public void zrangeByScore(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
}
=======
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
>>>>>>> YOURS
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final long start,
final long end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte[] start,
final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
<<<<<<< MINE
public void zrangeByScore(final byte[] key, final double min,
final double max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
=======
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count));
}
>>>>>>> YOURS
public void zrangeByScore(final byte[] key, final byte min[],
final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min,
final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
<<<<<<< MINE
public void zrevrangeByScore(final byte[] key, final double max,
final double min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
=======
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count));
}
>>>>>>> YOURS
public void zrevrangeByScore(final byte[] key, final byte max[],
final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
<<<<<<< MINE
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
WITHSCORES.raw);
}
=======
public void zrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
>>>>>>> YOURS
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
WITHSCORES.raw);
}
<<<<<<< MINE
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
WITHSCORES.raw);
}
=======
public void zrevrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
>>>>>>> YOURS
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
WITHSCORES.raw);
}
<<<<<<< MINE
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
=======
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
>>>>>>> YOURS
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
<<<<<<< MINE
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
=======
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
>>>>>>> YOURS
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zremrangeByScore(final byte[] key, final byte start[],
final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start,
final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script,
byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++)
allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++)
args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch (op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE , AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(
scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
}
<<<<<<< MINE
public void zcount(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
=======
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
>>>>>>> YOURS
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
<<<<<<< MINE
public void zrangeByScore(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
}
=======
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
>>>>>>> YOURS
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
}
public void zremrangeByRank(final byte[] key, final long start,
final long end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte[] start,
final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
<<<<<<< MINE
public void zrangeByScore(final byte[] key, final double min,
final double max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
=======
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count));
}
>>>>>>> YOURS
public void zrangeByScore(final byte[] key, final byte min[],
final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min,
final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
<<<<<<< MINE
public void zrevrangeByScore(final byte[] key, final double max,
final double min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
=======
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count));
}
>>>>>>> YOURS
public void zrevrangeByScore(final byte[] key, final byte max[],
final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
<<<<<<< MINE
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
WITHSCORES.raw);
}
=======
public void zrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
>>>>>>> YOURS
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
WITHSCORES.raw);
}
<<<<<<< MINE
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
WITHSCORES.raw);
}
=======
public void zrevrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
>>>>>>> YOURS
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
WITHSCORES.raw);
}
<<<<<<< MINE
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
=======
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
>>>>>>> YOURS
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
<<<<<<< MINE
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
=======
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
}
>>>>>>> YOURS
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zremrangeByScore(final byte[] key, final byte start[],
final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start,
final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script,
byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++)
allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++)
args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch (op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
Unstructured
package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE, AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(
scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
<<<<<<< MINE
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
=======
public void zcount(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
public void zrangeByScore(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
>>>>>>> YOURS
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
<<<<<<< MINE
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count));
=======
public void zrevrangeByScore(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
public void zrangeByScore(final byte[] key, final double min,
final double max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final byte min[],
final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min,
final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count));
=======
public void zrevrangeByScore(final byte[] key, final double max,
final double min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte max[],
final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
=======
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
WITHSCORES.raw);
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
=======
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
WITHSCORES.raw);
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
=======
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
=======
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
>>>>>>> YOURS
}
public void zremrangeByRank(final byte[] key, final long start,
final long end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte[] start,
final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final byte start[],
final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start,
final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script,
byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++)
allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++)
args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch (op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}package redis.clients.jedis;
import static redis.clients.jedis.Protocol.toByteArray;
import static redis.clients.jedis.Protocol.Command.*;
import static redis.clients.jedis.Protocol.Keyword.ENCODING;
import static redis.clients.jedis.Protocol.Keyword.IDLETIME;
import static redis.clients.jedis.Protocol.Keyword.LEN;
import static redis.clients.jedis.Protocol.Keyword.LIMIT;
import static redis.clients.jedis.Protocol.Keyword.NO;
import static redis.clients.jedis.Protocol.Keyword.ONE;
import static redis.clients.jedis.Protocol.Keyword.REFCOUNT;
import static redis.clients.jedis.Protocol.Keyword.RESET;
import static redis.clients.jedis.Protocol.Keyword.STORE;
import static redis.clients.jedis.Protocol.Keyword.WITHSCORES;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import redis.clients.jedis.Protocol.Command;
import redis.clients.jedis.Protocol.Keyword;
import redis.clients.util.SafeEncoder;
public class BinaryClient extends Connection {
public enum LIST_POSITION {
BEFORE, AFTER;
public final byte[] raw;
private LIST_POSITION() {
raw = SafeEncoder.encode(name());
}
}
private boolean isInMulti;
private String password;
private long db;
public boolean isInMulti() {
return isInMulti;
}
public BinaryClient(final String host) {
super(host);
}
public BinaryClient(final String host, final int port) {
super(host, port);
}
private byte[][] joinParameters(byte[] first, byte[][] rest) {
byte[][] result = new byte[rest.length + 1][];
result[0] = first;
for (int i = 0; i < rest.length; i++) {
result[i + 1] = rest[i];
}
return result;
}
public void setPassword(final String password) {
this.password = password;
}
@Override
public void connect() {
if (!isConnected()) {
super.connect();
if (password != null) {
auth(password);
getStatusCodeReply();
}
if (db > 0) {
select(Long.valueOf(db).intValue());
getStatusCodeReply();
}
}
}
public void ping() {
sendCommand(Command.PING);
}
public void set(final byte[] key, final byte[] value) {
sendCommand(Command.SET, key, value);
}
public void get(final byte[] key) {
sendCommand(Command.GET, key);
}
public void quit() {
db = 0;
sendCommand(QUIT);
}
public void exists(final byte[] key) {
sendCommand(EXISTS, key);
}
public void del(final byte[]... keys) {
sendCommand(DEL, keys);
}
public void type(final byte[] key) {
sendCommand(TYPE, key);
}
public void flushDB() {
sendCommand(FLUSHDB);
}
public void keys(final byte[] pattern) {
sendCommand(KEYS, pattern);
}
public void randomKey() {
sendCommand(RANDOMKEY);
}
public void rename(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAME, oldkey, newkey);
}
public void renamenx(final byte[] oldkey, final byte[] newkey) {
sendCommand(RENAMENX, oldkey, newkey);
}
public void dbSize() {
sendCommand(DBSIZE);
}
public void expire(final byte[] key, final int seconds) {
sendCommand(EXPIRE, key, toByteArray(seconds));
}
public void expireAt(final byte[] key, final long unixTime) {
sendCommand(EXPIREAT, key, toByteArray(unixTime));
}
public void ttl(final byte[] key) {
sendCommand(TTL, key);
}
public void select(final int index) {
db = index;
sendCommand(SELECT, toByteArray(index));
}
public void move(final byte[] key, final int dbIndex) {
sendCommand(MOVE, key, toByteArray(dbIndex));
}
public void flushAll() {
sendCommand(FLUSHALL);
}
public void getSet(final byte[] key, final byte[] value) {
sendCommand(GETSET, key, value);
}
public void mget(final byte[]... keys) {
sendCommand(MGET, keys);
}
public void setnx(final byte[] key, final byte[] value) {
sendCommand(SETNX, key, value);
}
public void setex(final byte[] key, final int seconds, final byte[] value) {
sendCommand(SETEX, key, toByteArray(seconds), value);
}
public void mset(final byte[]... keysvalues) {
sendCommand(MSET, keysvalues);
}
public void msetnx(final byte[]... keysvalues) {
sendCommand(MSETNX, keysvalues);
}
public void decrBy(final byte[] key, final long integer) {
sendCommand(DECRBY, key, toByteArray(integer));
}
public void decr(final byte[] key) {
sendCommand(DECR, key);
}
public void incrBy(final byte[] key, final long integer) {
sendCommand(INCRBY, key, toByteArray(integer));
}
public void incr(final byte[] key) {
sendCommand(INCR, key);
}
public void append(final byte[] key, final byte[] value) {
sendCommand(APPEND, key, value);
}
public void substr(final byte[] key, final int start, final int end) {
sendCommand(SUBSTR, key, toByteArray(start), toByteArray(end));
}
public void hset(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSET, key, field, value);
}
public void hget(final byte[] key, final byte[] field) {
sendCommand(HGET, key, field);
}
public void hsetnx(final byte[] key, final byte[] field, final byte[] value) {
sendCommand(HSETNX, key, field, value);
}
public void hmset(final byte[] key, final Map<byte[], byte[]> hash) {
final List<byte[]> params = new ArrayList<byte[]>();
params.add(key);
for (final Entry<byte[], byte[]> entry : hash.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
sendCommand(HMSET, params.toArray(new byte[params.size()][]));
}
public void hmget(final byte[] key, final byte[]... fields) {
final byte[][] params = new byte[fields.length + 1][];
params[0] = key;
System.arraycopy(fields, 0, params, 1, fields.length);
sendCommand(HMGET, params);
}
public void hincrBy(final byte[] key, final byte[] field, final long value) {
sendCommand(HINCRBY, key, field, toByteArray(value));
}
public void hexists(final byte[] key, final byte[] field) {
sendCommand(HEXISTS, key, field);
}
public void hdel(final byte[] key, final byte[]... fields) {
sendCommand(HDEL, joinParameters(key, fields));
}
public void hlen(final byte[] key) {
sendCommand(HLEN, key);
}
public void hkeys(final byte[] key) {
sendCommand(HKEYS, key);
}
public void hvals(final byte[] key) {
sendCommand(HVALS, key);
}
public void hgetAll(final byte[] key) {
sendCommand(HGETALL, key);
}
public void rpush(final byte[] key, final byte[]... strings) {
sendCommand(RPUSH, joinParameters(key, strings));
}
public void lpush(final byte[] key, final byte[]... strings) {
sendCommand(LPUSH, joinParameters(key, strings));
}
public void llen(final byte[] key) {
sendCommand(LLEN, key);
}
public void lrange(final byte[] key, final long start, final long end) {
sendCommand(LRANGE, key, toByteArray(start), toByteArray(end));
}
public void ltrim(final byte[] key, final long start, final long end) {
sendCommand(LTRIM, key, toByteArray(start), toByteArray(end));
}
public void lindex(final byte[] key, final long index) {
sendCommand(LINDEX, key, toByteArray(index));
}
public void lset(final byte[] key, final long index, final byte[] value) {
sendCommand(LSET, key, toByteArray(index), value);
}
public void lrem(final byte[] key, long count, final byte[] value) {
sendCommand(LREM, key, toByteArray(count), value);
}
public void lpop(final byte[] key) {
sendCommand(LPOP, key);
}
public void rpop(final byte[] key) {
sendCommand(RPOP, key);
}
public void rpoplpush(final byte[] srckey, final byte[] dstkey) {
sendCommand(RPOPLPUSH, srckey, dstkey);
}
public void sadd(final byte[] key, final byte[]... members) {
sendCommand(SADD, joinParameters(key, members));
}
public void smembers(final byte[] key) {
sendCommand(SMEMBERS, key);
}
public void srem(final byte[] key, final byte[]... members) {
sendCommand(SREM, joinParameters(key, members));
}
public void spop(final byte[] key) {
sendCommand(SPOP, key);
}
public void smove(final byte[] srckey, final byte[] dstkey,
final byte[] member) {
sendCommand(SMOVE, srckey, dstkey, member);
}
public void scard(final byte[] key) {
sendCommand(SCARD, key);
}
public void sismember(final byte[] key, final byte[] member) {
sendCommand(SISMEMBER, key, member);
}
public void sinter(final byte[]... keys) {
sendCommand(SINTER, keys);
}
public void sinterstore(final byte[] dstkey, final byte[]... keys) {
final byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SINTERSTORE, params);
}
public void sunion(final byte[]... keys) {
sendCommand(SUNION, keys);
}
public void sunionstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SUNIONSTORE, params);
}
public void sdiff(final byte[]... keys) {
sendCommand(SDIFF, keys);
}
public void sdiffstore(final byte[] dstkey, final byte[]... keys) {
byte[][] params = new byte[keys.length + 1][];
params[0] = dstkey;
System.arraycopy(keys, 0, params, 1, keys.length);
sendCommand(SDIFFSTORE, params);
}
public void srandmember(final byte[] key) {
sendCommand(SRANDMEMBER, key);
}
public void zadd(final byte[] key, final double score, final byte[] member) {
sendCommand(ZADD, key, toByteArray(score), member);
}
public void zaddBinary(final byte[] key, Map<Double, byte[]> scoreMembers) {
ArrayList<byte[]> args = new ArrayList<byte[]>(
scoreMembers.size() * 2 + 1);
args.add(key);
for (Map.Entry<Double, byte[]> entry : scoreMembers.entrySet()) {
args.add(toByteArray(entry.getKey()));
args.add(entry.getValue());
}
byte[][] argsArray = new byte[args.size()][];
args.toArray(argsArray);
sendCommand(ZADD, argsArray);
}
public void zrange(final byte[] key, final long start, final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrem(final byte[] key, final byte[]... members) {
sendCommand(ZREM, joinParameters(key, members));
}
public void zincrby(final byte[] key, final double score,
final byte[] member) {
sendCommand(ZINCRBY, key, toByteArray(score), member);
}
public void zrank(final byte[] key, final byte[] member) {
sendCommand(ZRANK, key, member);
}
public void zrevrank(final byte[] key, final byte[] member) {
sendCommand(ZREVRANK, key, member);
}
public void zrevrange(final byte[] key, final long start, final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end));
}
public void zrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zrevrangeWithScores(final byte[] key, final long start,
final long end) {
sendCommand(ZREVRANGE, key, toByteArray(start), toByteArray(end),
WITHSCORES.raw);
}
public void zcard(final byte[] key) {
sendCommand(ZCARD, key);
}
public void zscore(final byte[] key, final byte[] member) {
sendCommand(ZSCORE, key, member);
}
public void multi() {
sendCommand(MULTI);
isInMulti = true;
}
public void discard() {
sendCommand(DISCARD);
isInMulti = false;
}
public void exec() {
sendCommand(EXEC);
isInMulti = false;
}
public void watch(final byte[]... keys) {
sendCommand(WATCH, keys);
}
public void unwatch() {
sendCommand(UNWATCH);
}
public void sort(final byte[] key) {
sendCommand(SORT, key);
}
public void sort(final byte[] key, final SortingParams sortingParameters) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void blpop(final byte[][] args) {
sendCommand(BLPOP, args);
}
public void blpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
blpop(args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final SortingParams sortingParameters,
final byte[] dstkey) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(key);
args.addAll(sortingParameters.getParams());
args.add(STORE.raw);
args.add(dstkey);
sendCommand(SORT, args.toArray(new byte[args.size()][]));
}
public void sort(final byte[] key, final byte[] dstkey) {
sendCommand(SORT, key, STORE.raw, dstkey);
}
public void brpop(final byte[][] args) {
sendCommand(BRPOP, args);
}
public void brpop(final int timeout, final byte[]... keys) {
final List<byte[]> args = new ArrayList<byte[]>();
for (final byte[] arg : keys) {
args.add(arg);
}
args.add(Protocol.toByteArray(timeout));
brpop(args.toArray(new byte[args.size()][]));
}
public void auth(final String password) {
setPassword(password);
sendCommand(AUTH, password);
}
public void subscribe(final byte[]... channels) {
sendCommand(SUBSCRIBE, channels);
}
public void publish(final byte[] channel, final byte[] message) {
sendCommand(PUBLISH, channel, message);
}
public void unsubscribe() {
sendCommand(UNSUBSCRIBE);
}
public void unsubscribe(final byte[]... channels) {
sendCommand(UNSUBSCRIBE, channels);
}
public void psubscribe(final byte[]... patterns) {
sendCommand(PSUBSCRIBE, patterns);
}
public void punsubscribe() {
sendCommand(PUNSUBSCRIBE);
}
public void punsubscribe(final byte[]... patterns) {
sendCommand(PUNSUBSCRIBE, patterns);
}
<<<<<<< MINE
public void zcount(final byte[] key, final byte[] min, final byte[] max) {
sendCommand(ZCOUNT, key, min, max);
=======
public void zcount(final byte[] key, final double min, final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZCOUNT, key, byteArrayMin, byteArrayMax);
}
public void zcount(final byte[] key, final byte min[], final byte max[]) {
sendCommand(ZCOUNT, key, min, max);
}
public void zcount(final byte[] key, final String min, final String max) {
sendCommand(ZCOUNT, key, min.getBytes(), max.getBytes());
}
public void zrangeByScore(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax);
>>>>>>> YOURS
}
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max);
}
public void zrangeByScore(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes());
}
public void zrevrangeByScore(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin);
}
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min);
}
<<<<<<< MINE
public void zrangeByScore(final byte[] key, final byte[] min,
final byte[] max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count));
=======
public void zrevrangeByScore(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes());
}
public void zrangeByScore(final byte[] key, final double min,
final double max, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final byte min[],
final byte max[], final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrangeByScore(final byte[] key, final String min,
final String max, final int offset, int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrevrangeByScore(final byte[] key, final byte[] max,
final byte[] min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count));
=======
public void zrevrangeByScore(final byte[] key, final double max,
final double min, final int offset, int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final byte max[],
final byte min[], final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count));
}
public void zrevrangeByScore(final byte[] key, final String max,
final String min, final int offset, int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count));
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max) {
sendCommand(ZRANGEBYSCORE, key, min, max, WITHSCORES.raw);
=======
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[]) {
sendCommand(ZRANGEBYSCORE, key, min, max,
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
WITHSCORES.raw);
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, WITHSCORES.raw);
=======
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[]) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
WITHSCORES.raw);
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrangeByScoreWithScores(final byte[] key, final byte[] min,
final byte[] max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
=======
public void zrangeByScoreWithScores(final byte[] key, final double min,
final double max, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZRANGEBYSCORE, key, byteArrayMin, byteArrayMax,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final byte min[],
final byte max[], final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min, max,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrangeByScoreWithScores(final byte[] key, final String min,
final String max, final int offset, final int count) {
sendCommand(ZRANGEBYSCORE, key, min.getBytes(), max.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
>>>>>>> YOURS
}
<<<<<<< MINE
public void zrevrangeByScoreWithScores(final byte[] key, final byte[] max,
final byte[] min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min, LIMIT.raw,
toByteArray(offset), toByteArray(count), WITHSCORES.raw);
=======
public void zrevrangeByScoreWithScores(final byte[] key, final double max,
final double min, final int offset, final int count) {
byte byteArrayMin[] = (min == Double.NEGATIVE_INFINITY) ? "-inf".getBytes() : toByteArray(min);
byte byteArrayMax[] = (max == Double.POSITIVE_INFINITY) ? "+inf".getBytes() : toByteArray(max);
sendCommand(ZREVRANGEBYSCORE, key, byteArrayMax, byteArrayMin,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final byte max[],
final byte min[], final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max, min,
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
}
public void zrevrangeByScoreWithScores(final byte[] key, final String max,
final String min, final int offset, final int count) {
sendCommand(ZREVRANGEBYSCORE, key, max.getBytes(), min.getBytes(),
LIMIT.raw, toByteArray(offset), toByteArray(count),
WITHSCORES.raw);
>>>>>>> YOURS
}
public void zremrangeByRank(final byte[] key, final long start,
final long end) {
sendCommand(ZREMRANGEBYRANK, key, toByteArray(start), toByteArray(end));
}
public void zremrangeByScore(final byte[] key, final byte[] start,
final byte[] end) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final byte start[],
final byte end[]) {
sendCommand(ZREMRANGEBYSCORE, key, start, end);
}
public void zremrangeByScore(final byte[] key, final String start,
final String end) {
sendCommand(ZREMRANGEBYSCORE, key, start.getBytes(), end.getBytes());
}
public void zunionstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZUNIONSTORE, params);
}
public void zunionstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZUNIONSTORE, args.toArray(new byte[args.size()][]));
}
public void zinterstore(final byte[] dstkey, final byte[]... sets) {
final byte[][] params = new byte[sets.length + 2][];
params[0] = dstkey;
params[1] = Protocol.toByteArray(sets.length);
System.arraycopy(sets, 0, params, 2, sets.length);
sendCommand(ZINTERSTORE, params);
}
public void zinterstore(final byte[] dstkey, final ZParams params,
final byte[]... sets) {
final List<byte[]> args = new ArrayList<byte[]>();
args.add(dstkey);
args.add(Protocol.toByteArray(sets.length));
for (final byte[] set : sets) {
args.add(set);
}
args.addAll(params.getParams());
sendCommand(ZINTERSTORE, args.toArray(new byte[args.size()][]));
}
public void save() {
sendCommand(SAVE);
}
public void bgsave() {
sendCommand(BGSAVE);
}
public void bgrewriteaof() {
sendCommand(BGREWRITEAOF);
}
public void lastsave() {
sendCommand(LASTSAVE);
}
public void shutdown() {
sendCommand(SHUTDOWN);
}
public void info() {
sendCommand(INFO);
}
public void info(final String section) {
sendCommand(INFO, section);
}
public void monitor() {
sendCommand(MONITOR);
}
public void slaveof(final String host, final int port) {
sendCommand(SLAVEOF, host, String.valueOf(port));
}
public void slaveofNoOne() {
sendCommand(SLAVEOF, NO.raw, ONE.raw);
}
public void configGet(final byte[] pattern) {
sendCommand(CONFIG, Keyword.GET.raw, pattern);
}
public void configSet(final byte[] parameter, final byte[] value) {
sendCommand(CONFIG, Keyword.SET.raw, parameter, value);
}
public void strlen(final byte[] key) {
sendCommand(STRLEN, key);
}
public void sync() {
sendCommand(SYNC);
}
public void lpushx(final byte[] key, final byte[]... string) {
sendCommand(LPUSHX, joinParameters(key, string));
}
public void persist(final byte[] key) {
sendCommand(PERSIST, key);
}
public void rpushx(final byte[] key, final byte[]... string) {
sendCommand(RPUSHX, joinParameters(key, string));
}
public void echo(final byte[] string) {
sendCommand(ECHO, string);
}
public void linsert(final byte[] key, final LIST_POSITION where,
final byte[] pivot, final byte[] value) {
sendCommand(LINSERT, key, where.raw, pivot, value);
}
public void debug(final DebugParams params) {
sendCommand(DEBUG, params.getCommand());
}
public void brpoplpush(final byte[] source, final byte[] destination,
final int timeout) {
sendCommand(BRPOPLPUSH, source, destination, toByteArray(timeout));
}
public void configResetStat() {
sendCommand(CONFIG, Keyword.RESETSTAT.name());
}
public void setbit(byte[] key, long offset, byte[] value) {
sendCommand(SETBIT, key, toByteArray(offset), value);
}
public void setbit(byte[] key, long offset, boolean value) {
sendCommand(SETBIT, key, toByteArray(offset), toByteArray(value));
}
public void getbit(byte[] key, long offset) {
sendCommand(GETBIT, key, toByteArray(offset));
}
public void setrange(byte[] key, long offset, byte[] value) {
sendCommand(SETRANGE, key, toByteArray(offset), value);
}
public void getrange(byte[] key, long startOffset, long endOffset) {
sendCommand(GETRANGE, key, toByteArray(startOffset),
toByteArray(endOffset));
}
public Long getDB() {
return db;
}
public void disconnect() {
db = 0;
super.disconnect();
}
private void sendEvalCommand(Command command, byte[] script,
byte[] keyCount, byte[][] params) {
final byte[][] allArgs = new byte[params.length + 2][];
allArgs[0] = script;
allArgs[1] = keyCount;
for (int i = 0; i < params.length; i++)
allArgs[i + 2] = params[i];
sendCommand(command, allArgs);
}
public void eval(byte[] script, byte[] keyCount, byte[][] params) {
sendEvalCommand(EVAL, script, keyCount, params);
}
public void eval(byte[] script, int keyCount, byte[]... params) {
eval(script, toByteArray(keyCount), params);
}
public void evalsha(byte[] sha1, byte[] keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, keyCount, params);
}
public void evalsha(byte[] sha1, int keyCount, byte[]... params) {
sendEvalCommand(EVALSHA, sha1, toByteArray(keyCount), params);
}
public void scriptFlush() {
sendCommand(SCRIPT, Keyword.FLUSH.raw);
}
public void scriptExists(byte[]... sha1) {
byte[][] args = new byte[sha1.length + 1][];
args[0] = Keyword.EXISTS.raw;
for (int i = 0; i < sha1.length; i++)
args[i + 1] = sha1[i];
sendCommand(SCRIPT, args);
}
public void scriptLoad(byte[] script) {
sendCommand(SCRIPT, Keyword.LOAD.raw, script);
}
public void scriptKill() {
sendCommand(SCRIPT, Keyword.KILL.raw);
}
public void slowlogGet() {
sendCommand(SLOWLOG, Keyword.GET.raw);
}
public void slowlogGet(long entries) {
sendCommand(SLOWLOG, Keyword.GET.raw, toByteArray(entries));
}
public void slowlogReset() {
sendCommand(SLOWLOG, RESET.raw);
}
public void slowlogLen() {
sendCommand(SLOWLOG, LEN.raw);
}
public void objectRefcount(byte[] key) {
sendCommand(OBJECT, REFCOUNT.raw, key);
}
public void objectIdletime(byte[] key) {
sendCommand(OBJECT, IDLETIME.raw, key);
}
public void objectEncoding(byte[] key) {
sendCommand(OBJECT, ENCODING.raw, key);
}
public void bitcount(byte[] key) {
sendCommand(BITCOUNT, key);
}
public void bitcount(byte[] key, long start, long end) {
sendCommand(BITCOUNT, key, toByteArray(start), toByteArray(end));
}
public void bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
Keyword kw = Keyword.AND;
int len = srcKeys.length;
switch (op) {
case AND:
kw = Keyword.AND;
break;
case OR:
kw = Keyword.OR;
break;
case XOR:
kw = Keyword.XOR;
break;
case NOT:
kw = Keyword.NOT;
len = Math.min(1, len);
break;
}
byte[][] bargs = new byte[len + 2][];
bargs[0] = kw.raw;
bargs[1] = destKey;
for (int i = 0; i < len; ++i) {
bargs[i + 2] = srcKeys[i];
}
sendCommand(BITOP, bargs);
}
public void sentinel(final byte[]... args) {
sendCommand(SENTINEL, args);
}
public void dump(final byte[] key) {
sendCommand(DUMP, key);
}
public void restore(final byte[] key, final int ttl, final byte[] serializedValue) {
sendCommand(RESTORE, key, toByteArray(ttl), serializedValue);
}
public void pexpire(final byte[] key, final int milliseconds) {
sendCommand(PEXPIRE, key, toByteArray(milliseconds));
}
public void pexpireAt(final byte[] key, final long millisecondsTimestamp) {
sendCommand(PEXPIREAT, key, toByteArray(millisecondsTimestamp));
}
public void pttl(final byte[] key) {
sendCommand(PTTL, key);
}
public void incrByFloat(final byte[] key, final double increment) {
sendCommand(INCRBYFLOAT, key, toByteArray(increment));
}
public void psetex(final byte[] key, final int milliseconds, final byte[] value) {
sendCommand(PSETEX, key, toByteArray(milliseconds), value);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx) {
sendCommand(Command.SET, key, value, nxxx);
}
public void set(final byte[] key, final byte[] value, final byte[] nxxx, final byte[] expx, final int time) {
sendCommand(Command.SET, key, value, nxxx, expx, toByteArray(time));
}
public void srandmember(final byte[] key, final int count) {
sendCommand(SRANDMEMBER, key, toByteArray(count));
}
public void clientKill(final byte[] client) {
sendCommand(CLIENT, Keyword.KILL.raw, client);
}
public void clientGetname() {
sendCommand(CLIENT, Keyword.GETNAME.raw);
}
public void clientList() {
sendCommand(CLIENT, Keyword.LIST.raw);
}
public void clientSetname(final byte[] name) {
sendCommand(CLIENT, Keyword.SETNAME.raw, name);
}
public void time() {
sendCommand(TIME);
}
public void migrate(final byte[] host, final int port, final byte[] key, final int destinationDb, final int timeout) {
sendCommand(MIGRATE, host, toByteArray(port), key, toByteArray(destinationDb), toByteArray(timeout));
}
public void hincrByFloat(final byte[] key, final byte[] field, double increment) {
sendCommand(HINCRBYFLOAT, key, field, toByteArray(increment));
}
}
Diff Result
No diff
Case 24 - java_jedis.rev_d3362_f6254..Connection.java
Base
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(String host) {
super();
this.host = host;
}
protected Connection sendCommand(String name, String... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
public Connection(String host, int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
return (String) protocol.read(inputStream);
}
public String getBulkReply() {
pipelinedCommands--;
return (String) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<String> getMultiBulkReply() {
pipelinedCommands--;
return (List<String>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(String host) {
super();
this.host = host;
}
protected Connection sendCommand(String name, String... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
public Connection(String host, int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
return (String) protocol.read(inputStream);
}
public String getBulkReply() {
pipelinedCommands--;
return (String) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<String> getMultiBulkReply() {
pipelinedCommands--;
return (List<String>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
Left
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for(int i=0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd, final byte[]... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, args);
pipelinedCommands++;
return this;
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if(null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for(final byte[] barray : bresult) {
if( barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for(int i=0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd, final byte[]... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, args);
pipelinedCommands++;
return this;
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if(null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for(final byte[] barray : bresult) {
if( barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
Right
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(String host) {
super();
this.host = host;
}
protected Connection sendCommand(String name, String... args) {
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
public Connection(String host, int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
return (String) protocol.read(inputStream);
}
public String getBulkReply() {
pipelinedCommands--;
return (String) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<String> getMultiBulkReply() {
pipelinedCommands--;
return (List<String>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(String host) {
super();
this.host = host;
}
protected Connection sendCommand(String name, String... args) {
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
public Connection(String host, int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
return (String) protocol.read(inputStream);
}
public String getBulkReply() {
pipelinedCommands--;
return (String) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<String> getMultiBulkReply() {
pipelinedCommands--;
return (List<String>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
MergeMethods
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for (int i = 0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
protected Connection sendCommand(final Command cmd, final byte[]... args) {
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed() && socket.isConnected() && !socket.isInputShutdown() && !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if (null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for (final byte[] barray : bresult) {
if (barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for (int i = 0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
protected Connection sendCommand(final Command cmd, final byte[]... args) {
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed() && socket.isConnected() && !socket.isInputShutdown() && !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if (null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for (final byte[] barray : bresult) {
if (barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
KeepBothMethods
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for (int i = 0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd, final byte[]... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, args);
pipelinedCommands++;
return this;
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
protected Connection sendCommand(String name, String... args) {
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed() && socket.isConnected() && !socket.isInputShutdown() && !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if (null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for (final byte[] barray : bresult) {
if (barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for (int i = 0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd, final byte[]... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, args);
pipelinedCommands++;
return this;
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
protected Connection sendCommand(String name, String... args) {
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed() && socket.isConnected() && !socket.isInputShutdown() && !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if (null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for (final byte[] barray : bresult) {
if (barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
Safe
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for(int i=0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
<<<<<<< MINE
protected Connection sendCommand(String name, String... args) {
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
=======
protected Connection sendCommand(final Command cmd, final byte[]... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, args);
pipelinedCommands++;
return this;
}
>>>>>>> YOURS
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if(null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for(final byte[] barray : bresult) {
if( barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for(int i=0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
<<<<<<< MINE
protected Connection sendCommand(String name, String... args) {
protocol.sendCommand(outputStream, name, args);
pipelinedCommands++;
return this;
}
=======
protected Connection sendCommand(final Command cmd, final byte[]... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, args);
pipelinedCommands++;
return this;
}
>>>>>>> YOURS
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if(null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for(final byte[] barray : bresult) {
if( barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
Unstructured
package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
<<<<<<< MINE
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for(int i=0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd, final byte[]... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, args);
=======
protected Connection sendCommand(String name, String... args) {
protocol.sendCommand(outputStream, name, args);
>>>>>>> YOURS
pipelinedCommands++;
return this;
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if(null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for(final byte[] barray : bresult) {
if( barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}package redis.clients.jedis;
import java.io.IOException;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import redis.clients.jedis.Protocol.Command;
import redis.clients.util.RedisInputStream;
import redis.clients.util.RedisOutputStream;
public class Connection {
private String host;
private int port = Protocol.DEFAULT_PORT;
private Socket socket;
private Protocol protocol = new Protocol();
private RedisOutputStream outputStream;
private RedisInputStream inputStream;
private int pipelinedCommands = 0;
private int timeout = 2000;
public int getTimeout() {
return timeout;
}
public void setTimeout(final int timeout) {
this.timeout = timeout;
}
public void setTimeoutInfinite() {
try {
socket.setSoTimeout(0);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public void rollbackTimeout() {
try {
socket.setSoTimeout(timeout);
} catch (SocketException ex) {
throw new JedisException(ex);
}
}
public Connection(final String host) {
super();
this.host = host;
}
<<<<<<< MINE
protected Connection sendCommand(final Command cmd, final String... args) {
final byte[][] bargs = new byte[args.length][];
for(int i=0; i < args.length; i++) {
bargs[i] = args[i].getBytes(Protocol.UTF8);
}
return sendCommand(cmd, bargs);
}
protected Connection sendCommand(final Command cmd, final byte[]... args) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, args);
=======
protected Connection sendCommand(String name, String... args) {
protocol.sendCommand(outputStream, name, args);
>>>>>>> YOURS
pipelinedCommands++;
return this;
}
protected Connection sendCommand(final Command cmd) {
try {
connect();
} catch (UnknownHostException e) {
throw new JedisException("Could not connect to redis-server", e);
} catch (IOException e) {
throw new JedisException("Could not connect to redis-server", e);
}
protocol.sendCommand(outputStream, cmd, new byte[0][]);
pipelinedCommands++;
return this;
}
public Connection(final String host, final int port) {
super();
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public void setHost(final String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(final int port) {
this.port = port;
}
public Connection() {
}
public void connect() throws UnknownHostException, IOException {
if (!isConnected()) {
socket = new Socket(host, port);
socket.setSoTimeout(timeout);
outputStream = new RedisOutputStream(socket.getOutputStream());
inputStream = new RedisInputStream(socket.getInputStream());
}
}
public void disconnect() {
if (isConnected()) {
try {
inputStream.close();
outputStream.close();
if (!socket.isClosed()) {
socket.close();
}
} catch (IOException ex) {
throw new JedisException(ex);
}
}
}
public boolean isConnected() {
return socket != null && socket.isBound() && !socket.isClosed()
&& socket.isConnected() && !socket.isInputShutdown()
&& !socket.isOutputShutdown();
}
protected String getStatusCodeReply() {
pipelinedCommands--;
final byte[] resp = (byte[]) protocol.read(inputStream);
if (null == resp) {
return null;
} else {
return new String(resp, Protocol.UTF8);
}
}
public String getBulkReply() {
final byte[] result = getBinaryBulkReply();
if (null != result) {
return new String(result, Protocol.UTF8);
} else {
return null;
}
}
public byte[] getBinaryBulkReply() {
pipelinedCommands--;
return (byte[]) protocol.read(inputStream);
}
public Integer getIntegerReply() {
pipelinedCommands--;
return (Integer) protocol.read(inputStream);
}
public List<String> getMultiBulkReply() {
final List<byte[]> bresult = getBinaryMultiBulkReply();
if(null == bresult) {
return null;
}
final ArrayList<String> result = new ArrayList<String>(bresult.size());
for(final byte[] barray : bresult) {
if( barray == null) {
result.add(null);
} else {
result.add(new String(barray, Protocol.UTF8));
}
}
return result;
}
@SuppressWarnings("unchecked")
public List<byte[]> getBinaryMultiBulkReply() {
pipelinedCommands--;
return (List<byte[]>) protocol.read(inputStream);
}
@SuppressWarnings("unchecked")
public List<Object> getObjectMultiBulkReply() {
pipelinedCommands--;
return (List<Object>) protocol.read(inputStream);
}
public List<Object> getAll() {
List<Object> all = new ArrayList<Object>();
while (pipelinedCommands > 0) {
all.add(protocol.read(inputStream));
pipelinedCommands--;
}
return all;
}
public Object getOne() {
pipelinedCommands--;
return protocol.read(inputStream);
}
}
Diff Result
No diff
Case 25 - java_junit.rev_c8657_99a84..BlockJUnit4ClassRunner.java
Base
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Interceptor;
import org.junit.experimental.interceptor.StatementInterceptor;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
private void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
// TODO (May 26, 2009 10:48:26 PM): don't override this
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : interceptorFields())
validateInterceptorField(each.getField(), errors);
}
private void validateInterceptorField(Field field, List<Throwable> errors) {
if (!StatementInterceptor.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withInterceptors(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*/
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*/
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*/
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(
Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
protected Statement withInterceptors(FrameworkMethod method, Object test,
Statement statement) {
// TODO (May 26, 2009 11:16:13 PM): outsource to a class?
Statement result= statement;
for (FrameworkField each : interceptorFields())
try {
StatementInterceptor interceptor= (StatementInterceptor) each
.get(test);
result= interceptor.intercept(result, method);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
return result;
}
private List<FrameworkField> interceptorFields() {
return getTestClass().getAnnotatedFields(Interceptor.class);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*/
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
// TODO (May 11, 2009 11:28:21 PM):
// withBefores/withAfters/withBeforeClass/withAfterClass is a lot of
// duplication.
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(
After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Interceptor;
import org.junit.experimental.interceptor.StatementInterceptor;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
private void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
// TODO (May 26, 2009 10:48:26 PM): don't override this
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : interceptorFields())
validateInterceptorField(each.getField(), errors);
}
private void validateInterceptorField(Field field, List<Throwable> errors) {
if (!StatementInterceptor.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withInterceptors(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*/
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*/
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*/
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(
Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
protected Statement withInterceptors(FrameworkMethod method, Object test,
Statement statement) {
// TODO (May 26, 2009 11:16:13 PM): outsource to a class?
Statement result= statement;
for (FrameworkField each : interceptorFields())
try {
StatementInterceptor interceptor= (StatementInterceptor) each
.get(test);
result= interceptor.intercept(result, method);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
return result;
}
private List<FrameworkField> interceptorFields() {
return getTestClass().getAnnotatedFields(Interceptor.class);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*/
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
// TODO (May 11, 2009 11:28:21 PM):
// withBefores/withAfters/withBeforeClass/withAfterClass is a lot of
// duplication.
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(
After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
Left
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
validateInterceptorField(each.getField(), errors);
}
private void validateInterceptorField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
validateInterceptorField(each.getField(), errors);
}
private void validateInterceptorField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
Right
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
validateRuleField(each.getField(), errors);
}
private void validateRuleField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement MethodRule"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS allow {@code @Rule} fields to modify the execution of the above
* steps. A {@code Rule} may prevent all execution of the above steps, or
* add additional behavior before and after, or modify thrown exceptions.
* For more information, see {@link MethodRule}
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Rules instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Rules instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Rules instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Rules instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
validateRuleField(each.getField(), errors);
}
private void validateRuleField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement MethodRule"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS allow {@code @Rule} fields to modify the execution of the above
* steps. A {@code Rule} may prevent all execution of the above steps, or
* add additional behavior before and after, or modify thrown exceptions.
* For more information, see {@link MethodRule}
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Rules instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Rules instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Rules instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Rules instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
MergeMethods
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
<<<<<<< MINE
validateInterceptorField(each.getField(), errors);
=======
validateRuleField(each.getField(), errors);
>>>>>>> YOURS
}
private void validateRuleField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
<<<<<<< MINE
validateInterceptorField(each.getField(), errors);
=======
validateRuleField(each.getField(), errors);
>>>>>>> YOURS
}
private void validateRuleField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
KeepBothMethods
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
<<<<<<< MINE
validateInterceptorField(each.getField(), errors);
=======
validateRuleField(each.getField(), errors);
>>>>>>> YOURS
}
private void validateInterceptorField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
private void validateRuleField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement MethodRule"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
<<<<<<< MINE
validateInterceptorField(each.getField(), errors);
=======
validateRuleField(each.getField(), errors);
>>>>>>> YOURS
}
private void validateInterceptorField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
private void validateRuleField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement MethodRule"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
Safe
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
<<<<<<< MINE
validateInterceptorField(each.getField(), errors);
=======
validateRuleField(each.getField(), errors);
>>>>>>> YOURS
}
<<<<<<< MINE
private void validateRuleField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement MethodRule"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
=======
private void validateInterceptorField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
>>>>>>> YOURS
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
<<<<<<< MINE
validateInterceptorField(each.getField(), errors);
=======
validateRuleField(each.getField(), errors);
>>>>>>> YOURS
}
<<<<<<< MINE
private void validateRuleField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement MethodRule"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
=======
private void validateInterceptorField(Field field, List<Throwable> errors) {
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement StatementInterceptor"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
>>>>>>> YOURS
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
* @deprecated Will be private soon: use Interceptors instead
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
Unstructured
package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
<<<<<<< MINE
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
=======
>>>>>>> YOURS
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
<<<<<<< MINE
validateInterceptorField(each.getField(), errors);
=======
validateRuleField(each.getField(), errors);
>>>>>>> YOURS
}
<<<<<<< MINE
private void validateInterceptorField(Field field, List<Throwable> errors) {
=======
private void validateRuleField(Field field, List<Throwable> errors) {
>>>>>>> YOURS
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement MethodRule"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS allow {@code @Rule} fields to modify the execution of the above
* steps. A {@code Rule} may prevent all execution of the above steps, or
* add additional behavior before and after, or modify thrown exceptions.
* For more information, see {@link MethodRule}
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
<<<<<<< MINE
* @deprecated Will be private soon: use Interceptors instead
=======
* @deprecated Will be private soon: use Rules instead
>>>>>>> YOURS
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
<<<<<<< MINE
* @deprecated Will be private soon: use Interceptors instead
=======
* @deprecated Will be private soon: use Rules instead
>>>>>>> YOURS
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
<<<<<<< MINE
* @deprecated Will be private soon: use Interceptors instead
=======
* @deprecated Will be private soon: use Rules instead
>>>>>>> YOURS
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
<<<<<<< MINE
* @deprecated Will be private soon: use Interceptors instead
=======
* @deprecated Will be private soon: use Rules instead
>>>>>>> YOURS
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}package org.junit.runners;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Test.None;
<<<<<<< MINE
import org.junit.experimental.interceptor.Rule;
import org.junit.experimental.interceptor.MethodRule;
=======
>>>>>>> YOURS
import org.junit.internal.AssumptionViolatedException;
import org.junit.internal.runners.model.EachTestNotifier;
import org.junit.internal.runners.model.MultipleFailureException;
import org.junit.internal.runners.model.ReflectiveCallable;
import org.junit.internal.runners.statements.ExpectException;
import org.junit.internal.runners.statements.Fail;
import org.junit.internal.runners.statements.FailOnTimeout;
import org.junit.internal.runners.statements.InvokeMethod;
import org.junit.internal.runners.statements.RunAfters;
import org.junit.internal.runners.statements.RunBefores;
import org.junit.rules.MethodRule;
import org.junit.runner.Description;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.model.FrameworkField;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.Statement;
/**
* Implements the JUnit 4 standard test case class model, as defined by the
* annotations in the org.junit package. Many users will never notice this
* class: it is now the default test class runner, but it should have exactly
* the same behavior as the old test class runner ({@code JUnit4ClassRunner}).
*
* BlockJUnit4ClassRunner has advantages for writers of custom JUnit runners
* that are slight changes to the default behavior, however:
*
* <ul>
* <li>It has a much simpler implementation based on {@link Statement}s,
* allowing new operations to be inserted into the appropriate point in the
* execution flow.
*
* <li>It is published, and extension and reuse are encouraged, whereas {@code
* JUnit4ClassRunner} was in an internal package, and is now deprecated.
* </ul>
*/
public class BlockJUnit4ClassRunner extends ParentRunner<FrameworkMethod> {
/**
* Creates a BlockJUnit4ClassRunner to run {@code klass}
*
* @throws InitializationError
* if the test class is malformed.
*/
public BlockJUnit4ClassRunner(Class<?> klass) throws InitializationError {
super(klass);
}
//
// Implementation of ParentRunner
//
@Override
protected void runChild(FrameworkMethod method, RunNotifier notifier) {
EachTestNotifier eachNotifier= makeNotifier(method, notifier);
if (method.getAnnotation(Ignore.class) != null) {
eachNotifier.fireTestIgnored();
return;
}
eachNotifier.fireTestStarted();
try {
methodBlock(method).evaluate();
} catch (AssumptionViolatedException e) {
eachNotifier.addFailedAssumption(e);
} catch (Throwable e) {
eachNotifier.addFailure(e);
} finally {
eachNotifier.fireTestFinished();
}
}
@Override
protected Description describeChild(FrameworkMethod method) {
return Description.createTestDescription(getTestClass().getJavaClass(),
testName(method), method.getAnnotations());
}
@Override
protected List<FrameworkMethod> getChildren() {
return computeTestMethods();
}
//
// Override in subclasses
//
/**
* Returns the methods that run tests. Default implementation
* returns all methods annotated with {@code @Test} on this
* class and superclasses that are not overridden.
*/
protected List<FrameworkMethod> computeTestMethods() {
return getTestClass().getAnnotatedMethods(Test.class);
}
@Override
protected void collectInitializationErrors(List<Throwable> errors) {
super.collectInitializationErrors(errors);
validateConstructor(errors);
validateInstanceMethods(errors);
validateFields(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor,
* or if the constructor takes parameters. Override if a subclass requires
* different validation rules.
*/
protected void validateConstructor(List<Throwable> errors) {
validateOnlyOneConstructor(errors);
validateZeroArgConstructor(errors);
}
/**
* Adds to {@code errors} if the test class has more than one constructor
* (do not override)
*/
protected void validateOnlyOneConstructor(List<Throwable> errors) {
if (!hasOneConstructor()) {
String gripe= "Test class should have exactly one public constructor";
errors.add(new Exception(gripe));
}
}
/**
* Adds to {@code errors} if the test class's single constructor
* takes parameters
* (do not override)
*/
protected void validateZeroArgConstructor(List<Throwable> errors) {
if (hasOneConstructor()
&& !(getTestClass().getOnlyConstructor().getParameterTypes().length == 0)) {
String gripe= "Test class should have exactly one public zero-argument constructor";
errors.add(new Exception(gripe));
}
}
private boolean hasOneConstructor() {
return getTestClass().getJavaClass().getConstructors().length == 1;
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test},
* {@code @Before}, or {@code @After} that is not a public, void instance
* method with no arguments.
*/
protected void validateInstanceMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(After.class, false, errors);
validatePublicVoidNoArgMethods(Before.class, false, errors);
validateTestMethods(errors);
if (computeTestMethods().size() == 0)
errors.add(new Exception("No runnable methods"));
}
protected void validateFields(List<Throwable> errors) {
for (FrameworkField each : ruleFields())
<<<<<<< MINE
validateInterceptorField(each.getField(), errors);
=======
validateRuleField(each.getField(), errors);
>>>>>>> YOURS
}
<<<<<<< MINE
private void validateInterceptorField(Field field, List<Throwable> errors) {
=======
private void validateRuleField(Field field, List<Throwable> errors) {
>>>>>>> YOURS
if (!MethodRule.class.isAssignableFrom(field.getType()))
errors.add(new Exception("Field " + field.getName()
+ " must implement MethodRule"));
if (!Modifier.isPublic(field.getModifiers()))
errors.add(new Exception("Field " + field.getName()
+ " must be public"));
}
/**
* Adds to {@code errors} for each method annotated with {@code @Test}that
* is not a public, void instance method with no arguments.
*/
protected void validateTestMethods(List<Throwable> errors) {
validatePublicVoidNoArgMethods(Test.class, false, errors);
}
/**
* Returns a new fixture for running a test. Default implementation executes
* the test class's no-argument constructor (validation should have ensured
* one exists).
*/
protected Object createTest() throws Exception {
return getTestClass().getOnlyConstructor().newInstance();
}
/**
* Returns the name that describes {@code method} for {@link Description}s.
* Default implementation is the method's name
*/
protected String testName(FrameworkMethod method) {
return method.getName();
}
/**
* Returns a Statement that, when executed, either returns normally if
* {@code method} passes, or throws an exception if {@code method} fails.
*
* Here is an outline of the default implementation:
*
* <ul>
* <li>Invoke {@code method} on the result of {@code createTest()}, and
* throw any exceptions thrown by either operation.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* expecting} attribute, return normally only if the previous step threw an
* exception of the correct type, and throw an exception otherwise.
* <li>HOWEVER, if {@code method}'s {@code @Test} annotation has the {@code
* timeout} attribute, throw an exception if the previous step takes more
* than the specified number of milliseconds.
* <li>ALWAYS allow {@code @Rule} fields to modify the execution of the above
* steps. A {@code Rule} may prevent all execution of the above steps, or
* add additional behavior before and after, or modify thrown exceptions.
* For more information, see {@link MethodRule}
* <li>ALWAYS run all non-overridden {@code @Before} methods on this class
* and superclasses before any of the previous steps; if any throws an
* Exception, stop execution and pass the exception on.
* <li>ALWAYS run all non-overridden {@code @After} methods on this class
* and superclasses after any of the previous steps; all After methods are
* always executed: exceptions thrown by previous steps are combined, if
* necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
* </ul>
*
* This can be overridden in subclasses, either by overriding this method,
* or the implementations creating each sub-statement.
*/
protected Statement methodBlock(FrameworkMethod method) {
Object test;
try {
test= new ReflectiveCallable() {
@Override
protected Object runReflectiveCall() throws Throwable {
return createTest();
}
}.run();
} catch (Throwable e) {
return new Fail(e);
}
Statement statement= methodInvoker(method, test);
statement= possiblyExpectingExceptions(method, test, statement);
statement= withPotentialTimeout(method, test, statement);
statement= withRules(method, test, statement);
statement= withBefores(method, test, statement);
statement= withAfters(method, test, statement);
return statement;
}
//
// Statement builders
//
/**
* Returns a {@link Statement} that invokes {@code method} on {@code test}
*/
protected Statement methodInvoker(FrameworkMethod method, Object test) {
return new InvokeMethod(method, test);
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code expecting} attribute, return normally only if {@code next}
* throws an exception of the correct type, and throw an exception
* otherwise.
*
<<<<<<< MINE
* @deprecated Will be private soon: use Interceptors instead
=======
* @deprecated Will be private soon: use Rules instead
>>>>>>> YOURS
*/
@Deprecated
protected Statement possiblyExpectingExceptions(FrameworkMethod method,
Object test, Statement next) {
Test annotation= method.getAnnotation(Test.class);
return expectsException(annotation) ? new ExpectException(next,
getExpectedException(annotation)) : next;
}
/**
* Returns a {@link Statement}: if {@code method}'s {@code @Test} annotation
* has the {@code timeout} attribute, throw an exception if {@code next}
* takes more than the specified number of milliseconds.
*
<<<<<<< MINE
* @deprecated Will be private soon: use Interceptors instead
=======
* @deprecated Will be private soon: use Rules instead
>>>>>>> YOURS
*/
@Deprecated
protected Statement withPotentialTimeout(FrameworkMethod method,
Object test, Statement next) {
long timeout= getTimeout(method.getAnnotation(Test.class));
return timeout > 0 ? new FailOnTimeout(next, timeout) : next;
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @Before}
* methods on this class and superclasses before running {@code next}; if
* any throws an Exception, stop execution and pass the exception on.
*
<<<<<<< MINE
* @deprecated Will be private soon: use Interceptors instead
=======
* @deprecated Will be private soon: use Rules instead
>>>>>>> YOURS
*/
@Deprecated
protected Statement withBefores(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> befores= getTestClass().getAnnotatedMethods(Before.class);
return befores.isEmpty() ? statement :
new RunBefores(statement, befores, target);
}
/**
* Returns a {@link Statement}: run all non-overridden {@code @After}
* methods on this class and superclasses before running {@code next}; all
* After methods are always executed: exceptions thrown by previous steps
* are combined, if necessary, with exceptions from After methods into a
* {@link MultipleFailureException}.
*
<<<<<<< MINE
* @deprecated Will be private soon: use Interceptors instead
=======
* @deprecated Will be private soon: use Rules instead
>>>>>>> YOURS
*/
@Deprecated
protected Statement withAfters(FrameworkMethod method, Object target,
Statement statement) {
List<FrameworkMethod> afters= getTestClass().getAnnotatedMethods(After.class);
return afters.isEmpty() ? statement :
new RunAfters(statement, afters, target);
}
private Statement withRules(FrameworkMethod method, Object target,
Statement statement) {
Statement result= statement;
for (MethodRule each : rules(target))
result= each.apply(result, method, target);
return result;
}
/**
* @return the MethodRules that can transform the block
* that runs each method in the tested class.
*/
protected List<MethodRule> rules(Object test) {
List<MethodRule> results= new ArrayList<MethodRule>();
for (FrameworkField each : ruleFields())
results.add(createRule(test, each));
return results;
}
private List<FrameworkField> ruleFields() {
return getTestClass().getAnnotatedFields(Rule.class);
}
private MethodRule createRule(Object test,
FrameworkField each) {
try {
return (MethodRule) each.get(test);
} catch (IllegalAccessException e) {
throw new RuntimeException(
"How did getFields return a field we couldn't access?");
}
}
protected EachTestNotifier makeNotifier(FrameworkMethod method,
RunNotifier notifier) {
Description description= describeChild(method);
return new EachTestNotifier(notifier, description);
}
private Class<? extends Throwable> getExpectedException(Test annotation) {
if (annotation == null || annotation.expected() == None.class)
return null;
else
return annotation.expected();
}
private boolean expectsException(Test annotation) {
return getExpectedException(annotation) != null;
}
private long getTimeout(Test annotation) {
if (annotation == null)
return 0;
return annotation.timeout();
}
}
Diff Result
No diff
Case 26 - java_kotlin.rev_496d3_6fd6a..JetTypeInferrer.java
Base
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.PsiElement;
import com.intellij.psi.tree.IElementType;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lang.types.inference.ConstraintSystem;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.STATEMENT;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final ClassDescriptorResolver classDescriptorResolver;
private final TypeResolver typeResolver;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final AnnotationResolver annotationResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.annotationResolver = new AnnotationResolver(semanticServices, trace);
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, preferBlock, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, new TypeInferenceContext(trace, scope, preferBlock, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock) {
return typeInferrerVisitorWithNamespaces.getType(expression, new TypeInferenceContext(trace, scope, preferBlock, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
@Nullable
private FunctionDescriptor lookupFunction(
@NotNull JetScope scope,
@NotNull JetReferenceExpression reference,
@NotNull String name,
@NotNull JetType receiverType,
@NotNull List<JetType> argumentTypes,
boolean reportUnresolved) {
OverloadDomain overloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, name);
// No generics. Guaranteed
overloadDomain = wrapForTracing(overloadDomain, reference, null, reportUnresolved);
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(Collections.<JetType>emptyList(), argumentTypes);
return resolutionResult.isSuccess() ? resolutionResult.getFunctionDescriptor() : null;
}
@NotNull
private OverloadResolutionResult resolveNoParametersFunction(@NotNull JetType receiverType, @NotNull JetScope scope, @NotNull String name) {
OverloadDomain overloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, name);
// No generics. Guaranteed
return overloadDomain.getFunctionDescriptorForPositionedArguments(Collections.<JetType>emptyList(), Collections.<JetType>emptyList());
}
private OverloadDomain getOverloadDomain(
@Nullable final JetType receiverType,
@NotNull final JetScope scope,
@NotNull JetExpression calleeExpression,
@Nullable PsiElement argumentList
) {
final OverloadDomain[] result = new OverloadDomain[1];
final JetSimpleNameExpression[] reference = new JetSimpleNameExpression[1];
calleeExpression.accept(new JetVisitorVoid() {
@Override
public void visitHashQualifiedExpression(JetHashQualifiedExpression expression) {
// a#b -- create a domain for all overloads of b in a
throw new UnsupportedOperationException(); // TODO
}
@Override
public void visitPredicateExpression(JetPredicateExpression expression) {
// overload lookup for checking, but the type is receiver's type + nullable
throw new UnsupportedOperationException(); // TODO
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
trace.getErrorHandler().genericError(expression.getNode(), "Unsupported [JetTypeInferrer]");
// . or ?.
// JetType receiverType = getType(scope, expression.getReceiverExpression(), false);
// checkNullSafety(receiverType, expression.getOperationTokenNode());
//
// JetExpression selectorExpression = expression.getSelectorExpression();
// if (selectorExpression instanceof JetSimpleNameExpression) {
// JetSimpleNameExpression referenceExpression = (JetSimpleNameExpression) selectorExpression;
// String referencedName = referenceExpression.getReferencedName();
//
// if (receiverType != null && referencedName != null) {
// // No generics. Guaranteed
// result[0] = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, referencedName);
// reference[0] = referenceExpression;
// }
// } else {
// throw new UnsupportedOperationException(); // TODO
// }
}
@Override
public void visitSimpleNameExpression(JetSimpleNameExpression expression) {
// a -- create a hierarchical lookup domain for this.a
String referencedName = expression.getReferencedName();
if (referencedName != null) {
// No generics. Guaranteed
result[0] = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, referencedName);
reference[0] = expression;
}
}
@Override
public void visitExpression(JetExpression expression) {
// <e> create a dummy domain for the type of e
throw new UnsupportedOperationException(expression.getText()); // TODO
}
@Override
public void visitJetElement(JetElement element) {
trace.getErrorHandler().genericError(element.getNode(), "Unsupported in call element"); // TODO : Message
}
});
return wrapForTracing(result[0], reference[0], argumentList, true);
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
private OverloadDomain wrapForTracing(
@NotNull final OverloadDomain overloadDomain,
@NotNull final JetReferenceExpression referenceExpression,
@Nullable final PsiElement argumentList,
final boolean reportErrors) {
return new OverloadDomain() {
@NotNull
@Override
public OverloadResolutionResult getFunctionDescriptorForNamedArguments(@NotNull List<JetType> typeArguments, @NotNull Map<String, JetType> valueArgumentTypes, @Nullable JetType functionLiteralArgumentType) {
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForNamedArguments(typeArguments, valueArgumentTypes, functionLiteralArgumentType);
report(resolutionResult);
return resolutionResult;
}
@NotNull
@Override
public OverloadResolutionResult getFunctionDescriptorForPositionedArguments(@NotNull List<JetType> typeArguments, @NotNull List<JetType> positionedValueArgumentTypes) {
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(typeArguments, positionedValueArgumentTypes);
report(resolutionResult);
return resolutionResult;
}
private void report(OverloadResolutionResult resolutionResult) {
if (resolutionResult.isSuccess() || resolutionResult.singleFunction()) {
trace.record(BindingContext.REFERENCE_TARGET, referenceExpression, resolutionResult.getFunctionDescriptor());
}
if (reportErrors) {
switch (resolutionResult.getResultCode()) {
case NAME_NOT_FOUND:
trace.getErrorHandler().unresolvedReference(referenceExpression);
break;
case SINGLE_FUNCTION_ARGUMENT_MISMATCH:
if (argumentList != null) {
// TODO : More helpful message. NOTE: there's a separate handling for this for constructors
trace.getErrorHandler().genericError(argumentList.getNode(), "Arguments do not match " + DescriptorRenderer.TEXT.render(resolutionResult.getFunctionDescriptor()));
}
else {
trace.getErrorHandler().unresolvedReference(referenceExpression);
}
break;
case AMBIGUITY:
if (argumentList != null) {
// TODO : More helpful message. NOTE: there's a separate handling for this for constructors
trace.getErrorHandler().genericError(argumentList.getNode(), "Overload ambiguity [TODO : more helpful message]");
}
else {
trace.getErrorHandler().unresolvedReference(referenceExpression);
}
break;
default:
// Not a success
}
}
}
@Override
public boolean isEmpty() {
return overloadDomain.isEmpty();
}
};
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), dataFlowInfo, expectedReturnType, FORBIDDEN);
typeInferrerVisitor.getType(bodyExpression, context);
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = new TypeInferenceContext(trace, scope, true, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
newContext = new TypeInferenceContext(trace, scope, true, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
}
result = blockLevelVisitor.getType(statementExpression, newContext);
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = new TypeInferenceContext(trace, scope, true, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
@Nullable
private JetType resolveCall(
@NotNull JetScope scope,
@NotNull JetCall call,
@NotNull JetType expectedType
) {
if (call.getTypeArguments().isEmpty()) {
JetExpression calleeExpression = call.getCalleeExpression();
Collection<FunctionDescriptor> candidates;
if (calleeExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression expression = (JetSimpleNameExpression) calleeExpression;
candidates = scope.getFunctionGroup(expression.getReferencedName()).getFunctionDescriptors();
}
else {
throw new UnsupportedOperationException("Type argument inference not implemented");
}
assert candidates.size() == 1;
FunctionDescriptor candidate = candidates.iterator().next();
assert candidate.getTypeParameters().size() == call.getTypeArguments().size();
ConstraintSystem constraintSystem = new ConstraintSystem();
for (TypeParameterDescriptor typeParameterDescriptor : candidate.getTypeParameters()) {
constraintSystem.registerTypeVariable(typeParameterDescriptor, Variance.INVARIANT); // TODO
}
Iterator<ValueParameterDescriptor> parameters = candidate.getValueParameters().iterator();
for (JetValueArgument valueArgument : call.getValueArguments()) {
assert !valueArgument.isNamed();
ValueParameterDescriptor valueParameterDescriptor = parameters.next();
JetExpression expression = valueArgument.getArgumentExpression();
JetType type = getType(scope, expression, false, NO_EXPECTED_TYPE);
constraintSystem.addSubtypingConstraint(type, valueParameterDescriptor.getOutType());
}
if (expectedType != NO_EXPECTED_TYPE) {
System.out.println("expectedType = " + expectedType);
constraintSystem.addSubtypingConstraint(candidate.getReturnType(), expectedType);
}
ConstraintSystem.Solution solution = constraintSystem.solve();
if (!solution.isSuccessful()) {
trace.getErrorHandler().genericError(calleeExpression.getNode(), "Type inference failed");
// for (Inconsistency inconsistency : solution.getInconsistencies()) {
// System.out.println("inconsistency = " + inconsistency);
// }
return null;
}
else {
for (TypeParameterDescriptor typeParameterDescriptor : candidate.getTypeParameters()) {
JetType value = solution.getValue(typeParameterDescriptor);
System.out.println("typeParameterDescriptor = " + typeParameterDescriptor);
System.out.println("value = " + value);
}
return solution.getSubstitutor().substitute(candidate.getReturnType(), Variance.INVARIANT); // TODO
}
// return null;
}
else {
throw new UnsupportedOperationException("Explicit type arguments not implemented");
}
}
@Nullable
private JetType resolveCall(
@NotNull JetScope scope,
@NotNull OverloadDomain overloadDomain,
@NotNull JetCall call) {
// 1) ends with a name -> (scope, name) to look up
// 2) ends with something else -> just check types
final List<JetTypeProjection> jetTypeArguments = call.getTypeArguments();
for (JetTypeProjection typeArgument : jetTypeArguments) {
if (typeArgument.getProjectionKind() != JetProjectionKind.NONE) {
trace.getErrorHandler().genericError(typeArgument.getNode(), "Projections are not allowed on type parameters for methods"); // TODO : better positioning
}
}
List<JetType> typeArguments = new ArrayList<JetType>();
for (JetTypeProjection projection : jetTypeArguments) {
// TODO : check that there's no projection
JetTypeReference typeReference = projection.getTypeReference();
if (typeReference != null) {
typeArguments.add(new TypeResolver(semanticServices, trace, true).resolveType(scope, typeReference));
}
}
return resolveCallWithTypeArguments(scope, overloadDomain, call, typeArguments);
}
private JetType resolveCallWithTypeArguments(JetScope scope, OverloadDomain overloadDomain, JetCall call, List<JetType> typeArguments) {
final List<JetValueArgument> valueArguments = call.getValueArguments();
boolean someNamed = false;
for (JetValueArgument argument : valueArguments) {
if (argument.isNamed()) {
someNamed = true;
break;
}
}
final List<JetExpression> functionLiteralArguments = call.getFunctionLiteralArguments();
// TODO : must be a check
assert functionLiteralArguments.size() <= 1;
if (someNamed) {
// TODO : check that all are named
trace.getErrorHandler().genericError(call.asElement().getNode(), "Named arguments are not supported"); // TODO
} else {
List<JetExpression> positionedValueArguments = new ArrayList<JetExpression>();
for (JetValueArgument argument : valueArguments) {
JetExpression argumentExpression = argument.getArgumentExpression();
if (argumentExpression != null) {
positionedValueArguments.add(argumentExpression);
}
}
positionedValueArguments.addAll(functionLiteralArguments);
List<JetType> valueArgumentTypes = new ArrayList<JetType>();
for (JetExpression valueArgument : positionedValueArguments) {
valueArgumentTypes.add(safeGetType(scope, valueArgument, false, NO_EXPECTED_TYPE)); // TODO
}
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(typeArguments, valueArgumentTypes);
if (resolutionResult.isSuccess()) {
final FunctionDescriptor functionDescriptor = resolutionResult.getFunctionDescriptor();
checkGenericBoundsInAFunctionCall(call.getTypeArguments(), typeArguments, functionDescriptor);
return functionDescriptor.getReturnType();
}
}
return null;
}
private void checkGenericBoundsInAFunctionCall(List<JetTypeProjection> jetTypeArguments, List<JetType> typeArguments, FunctionDescriptor functionDescriptor) {
Map<TypeConstructor, TypeProjection> context = Maps.newHashMap();
List<TypeParameterDescriptor> typeParameters = functionDescriptor.getOriginal().getTypeParameters();
for (int i = 0, typeParametersSize = typeParameters.size(); i < typeParametersSize; i++) {
TypeParameterDescriptor typeParameter = typeParameters.get(i);
JetType typeArgument = typeArguments.get(i);
context.put(typeParameter.getTypeConstructor(), new TypeProjection(typeArgument));
}
TypeSubstitutor substitutor = TypeSubstitutor.create(context);
for (int i = 0, typeParametersSize = typeParameters.size(); i < typeParametersSize; i++) {
TypeParameterDescriptor typeParameterDescriptor = typeParameters.get(i);
JetType typeArgument = typeArguments.get(i);
JetTypeReference typeReference = jetTypeArguments.get(i).getTypeReference();
assert typeReference != null;
classDescriptorResolver.checkBounds(typeReference, typeArgument, typeParameterDescriptor, substitutor);
}
}
@Nullable
public JetType checkTypeInitializerCall(JetScope scope, @NotNull JetTypeReference typeReference, @NotNull JetCall call) {
JetTypeElement typeElement = typeReference.getTypeElement();
if (typeElement instanceof JetUserType) {
JetUserType userType = (JetUserType) typeElement;
// TODO : to infer constructor parameters, one will need to
// 1) resolve a _class_ from the typeReference
// 2) rely on the overload domain of constructors of this class to infer type arguments
// For now we assume that the type arguments are provided, and thus the typeReference can be
// resolved into a valid type
JetType receiverType = typeResolver.resolveType(scope, typeReference);
DeclarationDescriptor declarationDescriptor = receiverType.getConstructor().getDeclarationDescriptor();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
for (JetTypeProjection typeProjection : userType.getTypeArguments()) {
switch (typeProjection.getProjectionKind()) {
case IN:
case OUT:
case STAR:
// TODO : Bug in the editor
trace.getErrorHandler().genericError(typeProjection.getProjectionNode(), "Projections are not allowed in constructor type arguments");
break;
case NONE:
break;
}
}
JetSimpleNameExpression referenceExpression = userType.getReferenceExpression();
if (referenceExpression != null) {
return checkClassConstructorCall(scope, referenceExpression, classDescriptor, receiverType, call);
}
}
else {
trace.getErrorHandler().genericError(((JetElement) call).getNode(), "Calling a constructor is only supported for ordinary classes"); // TODO : review the message
}
return null;
}
else {
if (typeElement != null) {
trace.getErrorHandler().genericError(typeElement.getNode(), "Calling a constructor is only supported for ordinary classes"); // TODO : Better message
}
}
return null;
}
@Nullable
public JetType checkClassConstructorCall(
@NotNull JetScope scope,
@NotNull JetReferenceExpression referenceExpression,
@NotNull ClassDescriptor classDescriptor,
@NotNull JetType receiverType,
@NotNull JetCall call) {
// When one writes 'new Array<in T>(...)' this does not make much sense, and an instance
// of 'Array<T>' must be created anyway.
// Thus, we should either prohibit projections in type arguments in such contexts,
// or treat them as an automatic upcast to the desired type, i.e. for the user not
// to be forced to write
// val a : Array<in T> = new Array<T>(...)
// NOTE: Array may be a bad example here, some classes may have substantial functionality
// not involving their type parameters
//
// The code below upcasts the type automatically
FunctionGroup constructors = classDescriptor.getConstructors();
OverloadDomain constructorsOverloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(null, constructors);
JetType constructorReturnedType;
if (call instanceof JetDelegatorToThisCall) {
List<TypeProjection> typeArguments = receiverType.getArguments();
List<JetType> projectionsStripped = Lists.newArrayList();
for (TypeProjection typeArgument : typeArguments) {
projectionsStripped.add(typeArgument.getType());
}
constructorReturnedType = resolveCallWithTypeArguments(
scope,
wrapForTracing(constructorsOverloadDomain, referenceExpression, call.getValueArgumentList(), false),
call, projectionsStripped);
}
else {
constructorReturnedType = resolveCall(
scope,
wrapForTracing(constructorsOverloadDomain, referenceExpression, call.getValueArgumentList(), false),
call);
}
if (constructorReturnedType == null && !ErrorUtils.isErrorType(receiverType)) {
DeclarationDescriptor declarationDescriptor = receiverType.getConstructor().getDeclarationDescriptor();
assert declarationDescriptor != null;
trace.record(BindingContext.REFERENCE_TARGET, referenceExpression, declarationDescriptor);
// TODO : more helpful message
JetValueArgumentList argumentList = call.getValueArgumentList();
final String errorMessage = "Cannot find a constructor overload for class " + classDescriptor.getName() + " with these arguments";
if (argumentList != null) {
trace.getErrorHandler().genericError(argumentList.getNode(), errorMessage);
}
else {
trace.getErrorHandler().genericError(call.asElement().getNode(), errorMessage);
}
constructorReturnedType = receiverType;
}
// If no upcast needed:
return constructorReturnedType;
// Automatic upcast:
// result = receiverType;
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor == null) return expressionType;
JetType enrichedType = null;
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType: possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
enrichedType = possibleType;
break;
}
}
if (enrichedType == null) {
enrichedType = context.dataFlowInfo.getOutType(variableDescriptor);
}
if (enrichedType == null) {
enrichedType = expressionType;
}
if (!semanticServices.getTypeChecker().isSubtypeOf(enrichedType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
} else {
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
}
return enrichedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final boolean preferBlock;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
boolean preferBlock,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.preferBlock = preferBlock;
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return new TypeInferenceContext(trace, scope, preferBlock, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return new TypeInferenceContext(trace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return new TypeInferenceContext(trace, scope, preferBlock, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, expectedType, expectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public JetType getType(@NotNull JetScope scope, @NotNull JetExpression expression, boolean preferBlock, TypeInferenceContext context) {
return getType(expression, new TypeInferenceContext(context.trace, scope, preferBlock, context.dataFlowInfo, context.expectedType, context.expectedReturnType));
}
private JetType getTypeWithNewDataFlowInfo(JetScope scope, JetExpression expression, boolean preferBlock, @NotNull DataFlowInfo newDataFlowInfo, TypeInferenceContext context) {
return getType(expression, new TypeInferenceContext(context.trace, scope, preferBlock, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if ((boolean) context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type inference has run into a recursive problem"); // TODO : message
result = null;
}
if (!(boolean) context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return context.services.checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return context.services.getBlockReturnedTypeWithWritableScope(scope, block, context);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(BindingContext.REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
if (referencedName != null) {
VariableDescriptor variable = context.scope.getVariable(referencedName);
if (variable != null) {
context.trace.record(BindingContext.REFERENCE_TARGET, expression, variable);
JetType result = variable.getOutType();
if (result == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(BindingContext.REFERENCE_TARGET, expression, classifier);
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
}
context.trace.getErrorHandler().unresolvedReference(expression);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(BindingContext.REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
if (context.preferBlock && !functionLiteral.hasParameterSpecification()) {
context.trace.record(BindingContext.BLOCK, expression);
return context.services.checkType(getBlockReturnedType(context.scope, functionLiteral.getBodyExpression(), context), expression, context);
}
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Type inference for parameters is not implemented yet");
type = ErrorUtils.createErrorType("Not inferred");
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType = receiverTypeRef == null ? null : receiverType;
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo);
}
else {
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(context.expectedType)) {
returnType = JetStandardClasses.getReturnType(context.expectedType);
}
returnType = getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(context.scope, expression.getExpression(), false, context), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return null;
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(context.scope, thrownExpression, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(context.scope, returnedExpression, false, context.replaceExpectedType(context.expectedReturnType));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (operationType == JetTokens.COLON) {
if (actualType != null && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
}
result = targetType;
}
else if (operationType == JetTokens.AS_KEYWORD) {
checkForCastImpossibility(expression, actualType, targetType, context);
result = targetType;
}
else if (operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
result = TypeUtils.makeNullable(targetType);
}
else {
context.trace.getErrorHandler().genericError(expression.getOperationSign().getNode(), "Unsupported binary operation");
}
}
return context.services.checkType(result, expression, context);
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, false, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(BindingContext.REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(BindingContext.REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
return context.services.checkType(getBlockReturnedType(context.scope, expression, context), expression, context);
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, false, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewDataFlowInfo(scopeToExtend, bodyExpression, true, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetType selectorReturnType = getSelectorReturnType(subjectType, pattern.getDecomposerExpression(), context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(scopeToExtend, expression, false, context);
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType); // TODO : message
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchScope, catchBody, true, context);
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(context.scope, finallyBlock.getFinalExpression(), true, context);
if (type != null) {
types.add(type);
}
}
JetType type = getType(context.scope, tryBlock, true, context);
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewDataFlowInfo(thenScope, thenBranch, true, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewDataFlowInfo(context.scope, elseBranch, true, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewDataFlowInfo(thenScope, thenBranch, true, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewDataFlowInfo(context.scope, elseBranch, true, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(scope, condition, false, context);
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewDataFlowInfo(scopeToExtend, body, true, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(context.scope, body, true, context);
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(context.scope, loopRange, false, context);
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange.getNode(), context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(loopScope, body, true, context); // TODO
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getFunctionDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult nextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "next");
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getFunctionDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
private boolean checkHasNextPropertySupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return false;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
// @Override
// public void visitNewExpression(JetNewExpression expression) {
// // TODO : type argument inference
// JetTypeReference typeReference = expression.getTypeReference();
// if (typeReference != null) {
// result = checkTypeInitializerCall(scope, typeReference, expression);
// }
// }
// private void resolveCallWithExplicitName(@NotNull JetScope scope, JetExpression receiver, String functionName, List<JetType> resolvedTypeArguments, List<ValueArgumentPsi> valueArguments) {
//
// boolean someNamed = false;
// boolean allNamed = true;
// for (ValueArgumentPsi valueArgument : valueArguments) {
// if (valueArgument.isNamed()) {
// context.trace.getErrorHandler().genericError(valueArgument.asElement().getNode(), "Named arguments are not supported");
// someNamed = true;
// }
// else {
// allNamed = false;
// }
// }
// if (someNamed) {
// return; // TODO
// }
// if (someNamed && !allNamed) {
// // TODO function literals outside parentheses
// }
//
// ErrorHandlerWithRegions errorHandler = context.trace.getErrorHandler();
//
// // 1. resolve 'receiver' in 'scope' with expected type 'NO_EXPECTED_TYPE'
// errorHandler.openRegion();
// JetType receiverType = JetTypeInferrer.this.getType(scope, receiver, false, NO_EXPECTED_TYPE);
// // for each applicable function in 'receiverType'
// Set<FunctionDescriptor> allFunctions = receiverType.getMemberScope().getFunctionGroup(functionName).getFunctionDescriptors();
// Map<FunctionDescriptor, List<JetType>> applicableFunctions = Maps.newHashMap();
// int typeArgCount = resolvedTypeArguments.size();
// int valueArgCount = valueArguments.size();
// for (FunctionDescriptor functionDescriptor : allFunctions) {
// if (typeArgCount == 0 || functionDescriptor.getTypeParameters().size() == typeArgCount) {
// if (FunctionDescriptorUtil.getMinimumArity(functionDescriptor) <= valueArgCount &&
// valueArgCount <= FunctionDescriptorUtil.getMaximumArity(functionDescriptor)) {
// // get expected types for value parameters
// if (typeArgCount > 0) {
// FunctionDescriptor substitutedFunctionDescriptor = FunctionDescriptorUtil.substituteFunctionDescriptor(resolvedTypeArguments, functionDescriptor);
// }
// else {
// FunctionDescriptor substitutedFunctionDescriptor = FunctionDescriptorUtil.substituteFunctionDescriptor(TypeUtils.getDefaultTypes(functionDescriptor.getTypeParameters()), functionDescriptor);
// List<JetType> valueArgumentTypes = getArgumentTypes(substitutedFunctionDescriptor, valueArguments);
// if (valueArgumentTypes == null) {
// Map<TypeConstructor, TypeProjection> noExpectedTypes = Maps.newHashMap();
// for (TypeParameterDescriptor typeParameterDescriptor : functionDescriptor.getTypeParameters()) {
// noExpectedTypes.put(typeParameterDescriptor.getTypeConstructor(), new TypeProjection(NO_EXPECTED_TYPE));
// }
// substitutedFunctionDescriptor = functionDescriptor.substitute(TypeSubstitutor.create(noExpectedTypes));
// valueArgumentTypes = getArgumentTypes(substitutedFunctionDescriptor, valueArguments);
// }
// if (valueArgumentTypes != null) {
// List<JetType> typeArguments = solveConstraintSystem(functionDescriptor, valueArgumentTypes, expectedReturnType);
// if (typeArguments != null) {
// applicableFunctions.put(functionDescriptor, typeArguments);
// }
// }
// }
// // type-check the parameters
// // if something was found (one or many options
// errorHandler.closeAndCommitCurrentRegion();
// // otherwise
// errorHandler.closeAndReturnCurrentRegion();
//
// }
// }
// }
// // get expected types for value parameters
// // type-check the parameters
// // if something was found (one or many options
// errorHandler.closeAndCommitCurrentRegion();
// // otherwise
// errorHandler.closeAndReturnCurrentRegion();
//
// }
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, new TypeInferenceContext(context.trace, context.scope, false, context.dataFlowInfo, NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (receiverType == null) return null;
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
// ErrorHandler errorHandler = context.trace.getErrorHandler();
// errorHandler.openRegion();
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.addAllMyDataTo(context.trace);
somethingFound = true;
break;
}
else {
autocastResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
private JetType getCallExpressionType(@Nullable JetType receiverType, @NotNull JetCallExpression callExpression, TypeInferenceContext context) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression == null) {
return null;
}
OverloadDomain overloadDomain = context.services.getOverloadDomain(receiverType, context.scope, calleeExpression, callExpression.getValueArgumentList());
return context.services.resolveCall(context.scope, overloadDomain, callExpression);
}
private JetType getSelectorReturnType(JetType receiverType, JetExpression selectorExpression, TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return getCallExpressionType(receiverType, (JetCallExpression) selectorExpression, context);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetScope compositeScope = new ScopeWithReceiver(context.scope, receiverType, semanticServices.getTypeChecker());
return getType(compositeScope, selectorExpression, false, context);
}
else if (selectorExpression != null) {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
// return context.services.checkType(context.services.resolveCall(context.scope, expression, context.expectedType), expression, context);
return context.services.checkType(getCallExpressionType(null, expression, context), expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(context.scope, expression.getLeftHandSide(), false, context);
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(context.scope, baseExpression, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.lookupFunction(context.scope, expression.getOperationSign(), name, receiverType, Collections.<JetType>emptyList(), true);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(expression, referencedName, context.scope, true, context);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(expression, binaryOperationNames.get(operationType), context.scope, true, context);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(expression, "compareTo", context.scope, true, context);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(context.scope, left, false, context);
if (leftType != null) {
JetType rightType = getType(context.scope, right, false, context);
if (rightType != null) {
FunctionDescriptor equals = context.services.lookupFunction(
context.scope, operationSign, "equals",
leftType, Collections.singletonList(JetStandardClasses.getNullableAnyType()), false);
if (equals != null) {
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(context.scope, left, false, context);
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getTypeWithNewDataFlowInfo(rightScope, right, false, flowInfoLeft, context);
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(context.scope, left, false, context);
JetType rightType = right == null ? null : getType(context.scope, right, false, contextWithExpectedType);
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType containsType = getTypeForBinaryCall(context.scope, right, operationSign, left, name, true, context);
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(context.scope, left, false, context);
if (leftType != null && right != null) {
JetType rightType = getType(context.scope, right, false, context);
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Nullable
protected List<JetType> getTypes(JetScope scope, List<JetExpression> indexExpressions, TypeInferenceContext context) {
List<JetType> argumentTypes = new ArrayList<JetType>();
TypeInferenceContext newContext = new TypeInferenceContext(context.trace, scope, false, context.dataFlowInfo, NO_EXPECTED_TYPE, NO_EXPECTED_TYPE);
for (JetExpression indexExpression : indexExpressions) {
JetType type = context.services.typeInferrerVisitor.getType(indexExpression, newContext);
if (type == null) {
return null;
}
argumentTypes.add(type);
context.services.typeInferrerVisitor.resetResult(); // TODO : recreate?
}
return argumentTypes;
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(context.scope, arrayExpression, false, context);
List<JetExpression> indexExpressions = expression.getIndexExpressions();
List<JetType> argumentTypes = getTypes(context.scope, indexExpressions, context);
if (argumentTypes == null) return null;
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.lookupFunction(context.scope, expression, "get", receiverType, argumentTypes, true);
if (functionDescriptor != null) {
// checkNullSafety(receiverType, expression.getIndexExpressions().get(0).getNode(), functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(
@NotNull JetBinaryExpression expression,
@NotNull String name,
@NotNull JetScope scope,
boolean reportUnresolved,
@NotNull TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) {
return null;
}
JetSimpleNameExpression operationSign = expression.getOperationReference();
return getTypeForBinaryCall(scope, left, operationSign, right, name, reportUnresolved, context);
}
@Nullable
private JetType getTypeForBinaryCall(
@NotNull JetScope scope,
@NotNull JetExpression left,
@NotNull JetSimpleNameExpression operationSign,
@NotNull JetExpression right,
@NotNull String name,
boolean reportUnresolved,
@NotNull TypeInferenceContext context) {
JetType leftType = getType(scope, left, false, context);
JetType rightType = getType(scope, right, false, context);
if (leftType == null || rightType == null) {
return null;
}
FunctionDescriptor functionDescriptor = context.services.lookupFunction(scope, operationSign, name, leftType, Collections.singletonList(rightType), reportUnresolved);
if (functionDescriptor != null) {
if (leftType.isNullable()) {
// TODO : better error message for '1 + nullableVar' case
context.trace.getErrorHandler().genericError(operationSign.getNode(),
"Infix call corresponds to a dot-qualified call '" +
left.getText() + "." + name + "(" + right.getText() + ")'" +
" which is not allowed on a nullable receiver '" + right.getText() + "'." +
" Use '?.'-qualified call instead");
}
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(context.scope, entryExpression, true, context);
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(scope, initializer, false, context.replaceExpectedType(outType));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
JetType assignmentOperationType = getTypeForBinaryCall(expression, name, scope, false, context);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(expression, counterpartName, scope, true, context);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(scope, left, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (right != null) {
JetType rightType = getType(scope, right, false, context.replaceExpectedType(leftType));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
List<JetType> argumentTypes = getTypes(scope, arrayAccessExpression.getIndexExpressions(), context);
if (argumentTypes == null) return null;
JetType rhsType = getType(scope, rightHandSide, false, context);
if (rhsType == null) return null;
argumentTypes.add(rhsType);
JetType receiverType = getType(scope, arrayAccessExpression.getArrayExpression(), false, context);
if (receiverType == null) return null;
// TODO : nasty hack: effort is duplicated
context.services.lookupFunction(scope, arrayAccessExpression, "set", receiverType, argumentTypes, true);
FunctionDescriptor functionDescriptor = context.services.lookupFunction(scope, operationSign, "set", receiverType, argumentTypes, true);
if (functionDescriptor == null) return null;
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.PsiElement;
import com.intellij.psi.tree.IElementType;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lang.types.inference.ConstraintSystem;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.STATEMENT;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final ClassDescriptorResolver classDescriptorResolver;
private final TypeResolver typeResolver;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final AnnotationResolver annotationResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.annotationResolver = new AnnotationResolver(semanticServices, trace);
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, preferBlock, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, new TypeInferenceContext(trace, scope, preferBlock, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock) {
return typeInferrerVisitorWithNamespaces.getType(expression, new TypeInferenceContext(trace, scope, preferBlock, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
@Nullable
private FunctionDescriptor lookupFunction(
@NotNull JetScope scope,
@NotNull JetReferenceExpression reference,
@NotNull String name,
@NotNull JetType receiverType,
@NotNull List<JetType> argumentTypes,
boolean reportUnresolved) {
OverloadDomain overloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, name);
// No generics. Guaranteed
overloadDomain = wrapForTracing(overloadDomain, reference, null, reportUnresolved);
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(Collections.<JetType>emptyList(), argumentTypes);
return resolutionResult.isSuccess() ? resolutionResult.getFunctionDescriptor() : null;
}
@NotNull
private OverloadResolutionResult resolveNoParametersFunction(@NotNull JetType receiverType, @NotNull JetScope scope, @NotNull String name) {
OverloadDomain overloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, name);
// No generics. Guaranteed
return overloadDomain.getFunctionDescriptorForPositionedArguments(Collections.<JetType>emptyList(), Collections.<JetType>emptyList());
}
private OverloadDomain getOverloadDomain(
@Nullable final JetType receiverType,
@NotNull final JetScope scope,
@NotNull JetExpression calleeExpression,
@Nullable PsiElement argumentList
) {
final OverloadDomain[] result = new OverloadDomain[1];
final JetSimpleNameExpression[] reference = new JetSimpleNameExpression[1];
calleeExpression.accept(new JetVisitorVoid() {
@Override
public void visitHashQualifiedExpression(JetHashQualifiedExpression expression) {
// a#b -- create a domain for all overloads of b in a
throw new UnsupportedOperationException(); // TODO
}
@Override
public void visitPredicateExpression(JetPredicateExpression expression) {
// overload lookup for checking, but the type is receiver's type + nullable
throw new UnsupportedOperationException(); // TODO
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
trace.getErrorHandler().genericError(expression.getNode(), "Unsupported [JetTypeInferrer]");
// . or ?.
// JetType receiverType = getType(scope, expression.getReceiverExpression(), false);
// checkNullSafety(receiverType, expression.getOperationTokenNode());
//
// JetExpression selectorExpression = expression.getSelectorExpression();
// if (selectorExpression instanceof JetSimpleNameExpression) {
// JetSimpleNameExpression referenceExpression = (JetSimpleNameExpression) selectorExpression;
// String referencedName = referenceExpression.getReferencedName();
//
// if (receiverType != null && referencedName != null) {
// // No generics. Guaranteed
// result[0] = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, referencedName);
// reference[0] = referenceExpression;
// }
// } else {
// throw new UnsupportedOperationException(); // TODO
// }
}
@Override
public void visitSimpleNameExpression(JetSimpleNameExpression expression) {
// a -- create a hierarchical lookup domain for this.a
String referencedName = expression.getReferencedName();
if (referencedName != null) {
// No generics. Guaranteed
result[0] = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, referencedName);
reference[0] = expression;
}
}
@Override
public void visitExpression(JetExpression expression) {
// <e> create a dummy domain for the type of e
throw new UnsupportedOperationException(expression.getText()); // TODO
}
@Override
public void visitJetElement(JetElement element) {
trace.getErrorHandler().genericError(element.getNode(), "Unsupported in call element"); // TODO : Message
}
});
return wrapForTracing(result[0], reference[0], argumentList, true);
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
private OverloadDomain wrapForTracing(
@NotNull final OverloadDomain overloadDomain,
@NotNull final JetReferenceExpression referenceExpression,
@Nullable final PsiElement argumentList,
final boolean reportErrors) {
return new OverloadDomain() {
@NotNull
@Override
public OverloadResolutionResult getFunctionDescriptorForNamedArguments(@NotNull List<JetType> typeArguments, @NotNull Map<String, JetType> valueArgumentTypes, @Nullable JetType functionLiteralArgumentType) {
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForNamedArguments(typeArguments, valueArgumentTypes, functionLiteralArgumentType);
report(resolutionResult);
return resolutionResult;
}
@NotNull
@Override
public OverloadResolutionResult getFunctionDescriptorForPositionedArguments(@NotNull List<JetType> typeArguments, @NotNull List<JetType> positionedValueArgumentTypes) {
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(typeArguments, positionedValueArgumentTypes);
report(resolutionResult);
return resolutionResult;
}
private void report(OverloadResolutionResult resolutionResult) {
if (resolutionResult.isSuccess() || resolutionResult.singleFunction()) {
trace.record(BindingContext.REFERENCE_TARGET, referenceExpression, resolutionResult.getFunctionDescriptor());
}
if (reportErrors) {
switch (resolutionResult.getResultCode()) {
case NAME_NOT_FOUND:
trace.getErrorHandler().unresolvedReference(referenceExpression);
break;
case SINGLE_FUNCTION_ARGUMENT_MISMATCH:
if (argumentList != null) {
// TODO : More helpful message. NOTE: there's a separate handling for this for constructors
trace.getErrorHandler().genericError(argumentList.getNode(), "Arguments do not match " + DescriptorRenderer.TEXT.render(resolutionResult.getFunctionDescriptor()));
}
else {
trace.getErrorHandler().unresolvedReference(referenceExpression);
}
break;
case AMBIGUITY:
if (argumentList != null) {
// TODO : More helpful message. NOTE: there's a separate handling for this for constructors
trace.getErrorHandler().genericError(argumentList.getNode(), "Overload ambiguity [TODO : more helpful message]");
}
else {
trace.getErrorHandler().unresolvedReference(referenceExpression);
}
break;
default:
// Not a success
}
}
}
@Override
public boolean isEmpty() {
return overloadDomain.isEmpty();
}
};
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), dataFlowInfo, expectedReturnType, FORBIDDEN);
typeInferrerVisitor.getType(bodyExpression, context);
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = new TypeInferenceContext(trace, scope, true, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
newContext = new TypeInferenceContext(trace, scope, true, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
}
result = blockLevelVisitor.getType(statementExpression, newContext);
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = new TypeInferenceContext(trace, scope, true, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
@Nullable
private JetType resolveCall(
@NotNull JetScope scope,
@NotNull JetCall call,
@NotNull JetType expectedType
) {
if (call.getTypeArguments().isEmpty()) {
JetExpression calleeExpression = call.getCalleeExpression();
Collection<FunctionDescriptor> candidates;
if (calleeExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression expression = (JetSimpleNameExpression) calleeExpression;
candidates = scope.getFunctionGroup(expression.getReferencedName()).getFunctionDescriptors();
}
else {
throw new UnsupportedOperationException("Type argument inference not implemented");
}
assert candidates.size() == 1;
FunctionDescriptor candidate = candidates.iterator().next();
assert candidate.getTypeParameters().size() == call.getTypeArguments().size();
ConstraintSystem constraintSystem = new ConstraintSystem();
for (TypeParameterDescriptor typeParameterDescriptor : candidate.getTypeParameters()) {
constraintSystem.registerTypeVariable(typeParameterDescriptor, Variance.INVARIANT); // TODO
}
Iterator<ValueParameterDescriptor> parameters = candidate.getValueParameters().iterator();
for (JetValueArgument valueArgument : call.getValueArguments()) {
assert !valueArgument.isNamed();
ValueParameterDescriptor valueParameterDescriptor = parameters.next();
JetExpression expression = valueArgument.getArgumentExpression();
JetType type = getType(scope, expression, false, NO_EXPECTED_TYPE);
constraintSystem.addSubtypingConstraint(type, valueParameterDescriptor.getOutType());
}
if (expectedType != NO_EXPECTED_TYPE) {
System.out.println("expectedType = " + expectedType);
constraintSystem.addSubtypingConstraint(candidate.getReturnType(), expectedType);
}
ConstraintSystem.Solution solution = constraintSystem.solve();
if (!solution.isSuccessful()) {
trace.getErrorHandler().genericError(calleeExpression.getNode(), "Type inference failed");
// for (Inconsistency inconsistency : solution.getInconsistencies()) {
// System.out.println("inconsistency = " + inconsistency);
// }
return null;
}
else {
for (TypeParameterDescriptor typeParameterDescriptor : candidate.getTypeParameters()) {
JetType value = solution.getValue(typeParameterDescriptor);
System.out.println("typeParameterDescriptor = " + typeParameterDescriptor);
System.out.println("value = " + value);
}
return solution.getSubstitutor().substitute(candidate.getReturnType(), Variance.INVARIANT); // TODO
}
// return null;
}
else {
throw new UnsupportedOperationException("Explicit type arguments not implemented");
}
}
@Nullable
private JetType resolveCall(
@NotNull JetScope scope,
@NotNull OverloadDomain overloadDomain,
@NotNull JetCall call) {
// 1) ends with a name -> (scope, name) to look up
// 2) ends with something else -> just check types
final List<JetTypeProjection> jetTypeArguments = call.getTypeArguments();
for (JetTypeProjection typeArgument : jetTypeArguments) {
if (typeArgument.getProjectionKind() != JetProjectionKind.NONE) {
trace.getErrorHandler().genericError(typeArgument.getNode(), "Projections are not allowed on type parameters for methods"); // TODO : better positioning
}
}
List<JetType> typeArguments = new ArrayList<JetType>();
for (JetTypeProjection projection : jetTypeArguments) {
// TODO : check that there's no projection
JetTypeReference typeReference = projection.getTypeReference();
if (typeReference != null) {
typeArguments.add(new TypeResolver(semanticServices, trace, true).resolveType(scope, typeReference));
}
}
return resolveCallWithTypeArguments(scope, overloadDomain, call, typeArguments);
}
private JetType resolveCallWithTypeArguments(JetScope scope, OverloadDomain overloadDomain, JetCall call, List<JetType> typeArguments) {
final List<JetValueArgument> valueArguments = call.getValueArguments();
boolean someNamed = false;
for (JetValueArgument argument : valueArguments) {
if (argument.isNamed()) {
someNamed = true;
break;
}
}
final List<JetExpression> functionLiteralArguments = call.getFunctionLiteralArguments();
// TODO : must be a check
assert functionLiteralArguments.size() <= 1;
if (someNamed) {
// TODO : check that all are named
trace.getErrorHandler().genericError(call.asElement().getNode(), "Named arguments are not supported"); // TODO
} else {
List<JetExpression> positionedValueArguments = new ArrayList<JetExpression>();
for (JetValueArgument argument : valueArguments) {
JetExpression argumentExpression = argument.getArgumentExpression();
if (argumentExpression != null) {
positionedValueArguments.add(argumentExpression);
}
}
positionedValueArguments.addAll(functionLiteralArguments);
List<JetType> valueArgumentTypes = new ArrayList<JetType>();
for (JetExpression valueArgument : positionedValueArguments) {
valueArgumentTypes.add(safeGetType(scope, valueArgument, false, NO_EXPECTED_TYPE)); // TODO
}
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(typeArguments, valueArgumentTypes);
if (resolutionResult.isSuccess()) {
final FunctionDescriptor functionDescriptor = resolutionResult.getFunctionDescriptor();
checkGenericBoundsInAFunctionCall(call.getTypeArguments(), typeArguments, functionDescriptor);
return functionDescriptor.getReturnType();
}
}
return null;
}
private void checkGenericBoundsInAFunctionCall(List<JetTypeProjection> jetTypeArguments, List<JetType> typeArguments, FunctionDescriptor functionDescriptor) {
Map<TypeConstructor, TypeProjection> context = Maps.newHashMap();
List<TypeParameterDescriptor> typeParameters = functionDescriptor.getOriginal().getTypeParameters();
for (int i = 0, typeParametersSize = typeParameters.size(); i < typeParametersSize; i++) {
TypeParameterDescriptor typeParameter = typeParameters.get(i);
JetType typeArgument = typeArguments.get(i);
context.put(typeParameter.getTypeConstructor(), new TypeProjection(typeArgument));
}
TypeSubstitutor substitutor = TypeSubstitutor.create(context);
for (int i = 0, typeParametersSize = typeParameters.size(); i < typeParametersSize; i++) {
TypeParameterDescriptor typeParameterDescriptor = typeParameters.get(i);
JetType typeArgument = typeArguments.get(i);
JetTypeReference typeReference = jetTypeArguments.get(i).getTypeReference();
assert typeReference != null;
classDescriptorResolver.checkBounds(typeReference, typeArgument, typeParameterDescriptor, substitutor);
}
}
@Nullable
public JetType checkTypeInitializerCall(JetScope scope, @NotNull JetTypeReference typeReference, @NotNull JetCall call) {
JetTypeElement typeElement = typeReference.getTypeElement();
if (typeElement instanceof JetUserType) {
JetUserType userType = (JetUserType) typeElement;
// TODO : to infer constructor parameters, one will need to
// 1) resolve a _class_ from the typeReference
// 2) rely on the overload domain of constructors of this class to infer type arguments
// For now we assume that the type arguments are provided, and thus the typeReference can be
// resolved into a valid type
JetType receiverType = typeResolver.resolveType(scope, typeReference);
DeclarationDescriptor declarationDescriptor = receiverType.getConstructor().getDeclarationDescriptor();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
for (JetTypeProjection typeProjection : userType.getTypeArguments()) {
switch (typeProjection.getProjectionKind()) {
case IN:
case OUT:
case STAR:
// TODO : Bug in the editor
trace.getErrorHandler().genericError(typeProjection.getProjectionNode(), "Projections are not allowed in constructor type arguments");
break;
case NONE:
break;
}
}
JetSimpleNameExpression referenceExpression = userType.getReferenceExpression();
if (referenceExpression != null) {
return checkClassConstructorCall(scope, referenceExpression, classDescriptor, receiverType, call);
}
}
else {
trace.getErrorHandler().genericError(((JetElement) call).getNode(), "Calling a constructor is only supported for ordinary classes"); // TODO : review the message
}
return null;
}
else {
if (typeElement != null) {
trace.getErrorHandler().genericError(typeElement.getNode(), "Calling a constructor is only supported for ordinary classes"); // TODO : Better message
}
}
return null;
}
@Nullable
public JetType checkClassConstructorCall(
@NotNull JetScope scope,
@NotNull JetReferenceExpression referenceExpression,
@NotNull ClassDescriptor classDescriptor,
@NotNull JetType receiverType,
@NotNull JetCall call) {
// When one writes 'new Array<in T>(...)' this does not make much sense, and an instance
// of 'Array<T>' must be created anyway.
// Thus, we should either prohibit projections in type arguments in such contexts,
// or treat them as an automatic upcast to the desired type, i.e. for the user not
// to be forced to write
// val a : Array<in T> = new Array<T>(...)
// NOTE: Array may be a bad example here, some classes may have substantial functionality
// not involving their type parameters
//
// The code below upcasts the type automatically
FunctionGroup constructors = classDescriptor.getConstructors();
OverloadDomain constructorsOverloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(null, constructors);
JetType constructorReturnedType;
if (call instanceof JetDelegatorToThisCall) {
List<TypeProjection> typeArguments = receiverType.getArguments();
List<JetType> projectionsStripped = Lists.newArrayList();
for (TypeProjection typeArgument : typeArguments) {
projectionsStripped.add(typeArgument.getType());
}
constructorReturnedType = resolveCallWithTypeArguments(
scope,
wrapForTracing(constructorsOverloadDomain, referenceExpression, call.getValueArgumentList(), false),
call, projectionsStripped);
}
else {
constructorReturnedType = resolveCall(
scope,
wrapForTracing(constructorsOverloadDomain, referenceExpression, call.getValueArgumentList(), false),
call);
}
if (constructorReturnedType == null && !ErrorUtils.isErrorType(receiverType)) {
DeclarationDescriptor declarationDescriptor = receiverType.getConstructor().getDeclarationDescriptor();
assert declarationDescriptor != null;
trace.record(BindingContext.REFERENCE_TARGET, referenceExpression, declarationDescriptor);
// TODO : more helpful message
JetValueArgumentList argumentList = call.getValueArgumentList();
final String errorMessage = "Cannot find a constructor overload for class " + classDescriptor.getName() + " with these arguments";
if (argumentList != null) {
trace.getErrorHandler().genericError(argumentList.getNode(), errorMessage);
}
else {
trace.getErrorHandler().genericError(call.asElement().getNode(), errorMessage);
}
constructorReturnedType = receiverType;
}
// If no upcast needed:
return constructorReturnedType;
// Automatic upcast:
// result = receiverType;
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor == null) return expressionType;
JetType enrichedType = null;
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType: possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
enrichedType = possibleType;
break;
}
}
if (enrichedType == null) {
enrichedType = context.dataFlowInfo.getOutType(variableDescriptor);
}
if (enrichedType == null) {
enrichedType = expressionType;
}
if (!semanticServices.getTypeChecker().isSubtypeOf(enrichedType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
} else {
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
}
return enrichedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final boolean preferBlock;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
boolean preferBlock,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.preferBlock = preferBlock;
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return new TypeInferenceContext(trace, scope, preferBlock, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return new TypeInferenceContext(trace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return new TypeInferenceContext(trace, scope, preferBlock, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, expectedType, expectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public JetType getType(@NotNull JetScope scope, @NotNull JetExpression expression, boolean preferBlock, TypeInferenceContext context) {
return getType(expression, new TypeInferenceContext(context.trace, scope, preferBlock, context.dataFlowInfo, context.expectedType, context.expectedReturnType));
}
private JetType getTypeWithNewDataFlowInfo(JetScope scope, JetExpression expression, boolean preferBlock, @NotNull DataFlowInfo newDataFlowInfo, TypeInferenceContext context) {
return getType(expression, new TypeInferenceContext(context.trace, scope, preferBlock, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if ((boolean) context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type inference has run into a recursive problem"); // TODO : message
result = null;
}
if (!(boolean) context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return context.services.checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return context.services.getBlockReturnedTypeWithWritableScope(scope, block, context);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(BindingContext.REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
if (referencedName != null) {
VariableDescriptor variable = context.scope.getVariable(referencedName);
if (variable != null) {
context.trace.record(BindingContext.REFERENCE_TARGET, expression, variable);
JetType result = variable.getOutType();
if (result == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(BindingContext.REFERENCE_TARGET, expression, classifier);
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
}
context.trace.getErrorHandler().unresolvedReference(expression);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(BindingContext.REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
if (context.preferBlock && !functionLiteral.hasParameterSpecification()) {
context.trace.record(BindingContext.BLOCK, expression);
return context.services.checkType(getBlockReturnedType(context.scope, functionLiteral.getBodyExpression(), context), expression, context);
}
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Type inference for parameters is not implemented yet");
type = ErrorUtils.createErrorType("Not inferred");
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType = receiverTypeRef == null ? null : receiverType;
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo);
}
else {
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(context.expectedType)) {
returnType = JetStandardClasses.getReturnType(context.expectedType);
}
returnType = getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(context.scope, expression.getExpression(), false, context), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return null;
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(context.scope, thrownExpression, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(context.scope, returnedExpression, false, context.replaceExpectedType(context.expectedReturnType));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (operationType == JetTokens.COLON) {
if (actualType != null && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
}
result = targetType;
}
else if (operationType == JetTokens.AS_KEYWORD) {
checkForCastImpossibility(expression, actualType, targetType, context);
result = targetType;
}
else if (operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
result = TypeUtils.makeNullable(targetType);
}
else {
context.trace.getErrorHandler().genericError(expression.getOperationSign().getNode(), "Unsupported binary operation");
}
}
return context.services.checkType(result, expression, context);
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, false, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(BindingContext.REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(BindingContext.REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
return context.services.checkType(getBlockReturnedType(context.scope, expression, context), expression, context);
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, false, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewDataFlowInfo(scopeToExtend, bodyExpression, true, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetType selectorReturnType = getSelectorReturnType(subjectType, pattern.getDecomposerExpression(), context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(scopeToExtend, expression, false, context);
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType); // TODO : message
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchScope, catchBody, true, context);
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(context.scope, finallyBlock.getFinalExpression(), true, context);
if (type != null) {
types.add(type);
}
}
JetType type = getType(context.scope, tryBlock, true, context);
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewDataFlowInfo(thenScope, thenBranch, true, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewDataFlowInfo(context.scope, elseBranch, true, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewDataFlowInfo(thenScope, thenBranch, true, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewDataFlowInfo(context.scope, elseBranch, true, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(scope, condition, false, context);
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewDataFlowInfo(scopeToExtend, body, true, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(context.scope, body, true, context);
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(context.scope, loopRange, false, context);
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange.getNode(), context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(loopScope, body, true, context); // TODO
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getFunctionDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult nextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "next");
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getFunctionDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
private boolean checkHasNextPropertySupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return false;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
// @Override
// public void visitNewExpression(JetNewExpression expression) {
// // TODO : type argument inference
// JetTypeReference typeReference = expression.getTypeReference();
// if (typeReference != null) {
// result = checkTypeInitializerCall(scope, typeReference, expression);
// }
// }
// private void resolveCallWithExplicitName(@NotNull JetScope scope, JetExpression receiver, String functionName, List<JetType> resolvedTypeArguments, List<ValueArgumentPsi> valueArguments) {
//
// boolean someNamed = false;
// boolean allNamed = true;
// for (ValueArgumentPsi valueArgument : valueArguments) {
// if (valueArgument.isNamed()) {
// context.trace.getErrorHandler().genericError(valueArgument.asElement().getNode(), "Named arguments are not supported");
// someNamed = true;
// }
// else {
// allNamed = false;
// }
// }
// if (someNamed) {
// return; // TODO
// }
// if (someNamed && !allNamed) {
// // TODO function literals outside parentheses
// }
//
// ErrorHandlerWithRegions errorHandler = context.trace.getErrorHandler();
//
// // 1. resolve 'receiver' in 'scope' with expected type 'NO_EXPECTED_TYPE'
// errorHandler.openRegion();
// JetType receiverType = JetTypeInferrer.this.getType(scope, receiver, false, NO_EXPECTED_TYPE);
// // for each applicable function in 'receiverType'
// Set<FunctionDescriptor> allFunctions = receiverType.getMemberScope().getFunctionGroup(functionName).getFunctionDescriptors();
// Map<FunctionDescriptor, List<JetType>> applicableFunctions = Maps.newHashMap();
// int typeArgCount = resolvedTypeArguments.size();
// int valueArgCount = valueArguments.size();
// for (FunctionDescriptor functionDescriptor : allFunctions) {
// if (typeArgCount == 0 || functionDescriptor.getTypeParameters().size() == typeArgCount) {
// if (FunctionDescriptorUtil.getMinimumArity(functionDescriptor) <= valueArgCount &&
// valueArgCount <= FunctionDescriptorUtil.getMaximumArity(functionDescriptor)) {
// // get expected types for value parameters
// if (typeArgCount > 0) {
// FunctionDescriptor substitutedFunctionDescriptor = FunctionDescriptorUtil.substituteFunctionDescriptor(resolvedTypeArguments, functionDescriptor);
// }
// else {
// FunctionDescriptor substitutedFunctionDescriptor = FunctionDescriptorUtil.substituteFunctionDescriptor(TypeUtils.getDefaultTypes(functionDescriptor.getTypeParameters()), functionDescriptor);
// List<JetType> valueArgumentTypes = getArgumentTypes(substitutedFunctionDescriptor, valueArguments);
// if (valueArgumentTypes == null) {
// Map<TypeConstructor, TypeProjection> noExpectedTypes = Maps.newHashMap();
// for (TypeParameterDescriptor typeParameterDescriptor : functionDescriptor.getTypeParameters()) {
// noExpectedTypes.put(typeParameterDescriptor.getTypeConstructor(), new TypeProjection(NO_EXPECTED_TYPE));
// }
// substitutedFunctionDescriptor = functionDescriptor.substitute(TypeSubstitutor.create(noExpectedTypes));
// valueArgumentTypes = getArgumentTypes(substitutedFunctionDescriptor, valueArguments);
// }
// if (valueArgumentTypes != null) {
// List<JetType> typeArguments = solveConstraintSystem(functionDescriptor, valueArgumentTypes, expectedReturnType);
// if (typeArguments != null) {
// applicableFunctions.put(functionDescriptor, typeArguments);
// }
// }
// }
// // type-check the parameters
// // if something was found (one or many options
// errorHandler.closeAndCommitCurrentRegion();
// // otherwise
// errorHandler.closeAndReturnCurrentRegion();
//
// }
// }
// }
// // get expected types for value parameters
// // type-check the parameters
// // if something was found (one or many options
// errorHandler.closeAndCommitCurrentRegion();
// // otherwise
// errorHandler.closeAndReturnCurrentRegion();
//
// }
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, new TypeInferenceContext(context.trace, context.scope, false, context.dataFlowInfo, NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (receiverType == null) return null;
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
// ErrorHandler errorHandler = context.trace.getErrorHandler();
// errorHandler.openRegion();
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.addAllMyDataTo(context.trace);
somethingFound = true;
break;
}
else {
autocastResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
private JetType getCallExpressionType(@Nullable JetType receiverType, @NotNull JetCallExpression callExpression, TypeInferenceContext context) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression == null) {
return null;
}
OverloadDomain overloadDomain = context.services.getOverloadDomain(receiverType, context.scope, calleeExpression, callExpression.getValueArgumentList());
return context.services.resolveCall(context.scope, overloadDomain, callExpression);
}
private JetType getSelectorReturnType(JetType receiverType, JetExpression selectorExpression, TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return getCallExpressionType(receiverType, (JetCallExpression) selectorExpression, context);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetScope compositeScope = new ScopeWithReceiver(context.scope, receiverType, semanticServices.getTypeChecker());
return getType(compositeScope, selectorExpression, false, context);
}
else if (selectorExpression != null) {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
// return context.services.checkType(context.services.resolveCall(context.scope, expression, context.expectedType), expression, context);
return context.services.checkType(getCallExpressionType(null, expression, context), expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(context.scope, expression.getLeftHandSide(), false, context);
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(context.scope, baseExpression, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.lookupFunction(context.scope, expression.getOperationSign(), name, receiverType, Collections.<JetType>emptyList(), true);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(expression, referencedName, context.scope, true, context);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(expression, binaryOperationNames.get(operationType), context.scope, true, context);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(expression, "compareTo", context.scope, true, context);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(context.scope, left, false, context);
if (leftType != null) {
JetType rightType = getType(context.scope, right, false, context);
if (rightType != null) {
FunctionDescriptor equals = context.services.lookupFunction(
context.scope, operationSign, "equals",
leftType, Collections.singletonList(JetStandardClasses.getNullableAnyType()), false);
if (equals != null) {
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(context.scope, left, false, context);
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getTypeWithNewDataFlowInfo(rightScope, right, false, flowInfoLeft, context);
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(context.scope, left, false, context);
JetType rightType = right == null ? null : getType(context.scope, right, false, contextWithExpectedType);
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType containsType = getTypeForBinaryCall(context.scope, right, operationSign, left, name, true, context);
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(context.scope, left, false, context);
if (leftType != null && right != null) {
JetType rightType = getType(context.scope, right, false, context);
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Nullable
protected List<JetType> getTypes(JetScope scope, List<JetExpression> indexExpressions, TypeInferenceContext context) {
List<JetType> argumentTypes = new ArrayList<JetType>();
TypeInferenceContext newContext = new TypeInferenceContext(context.trace, scope, false, context.dataFlowInfo, NO_EXPECTED_TYPE, NO_EXPECTED_TYPE);
for (JetExpression indexExpression : indexExpressions) {
JetType type = context.services.typeInferrerVisitor.getType(indexExpression, newContext);
if (type == null) {
return null;
}
argumentTypes.add(type);
context.services.typeInferrerVisitor.resetResult(); // TODO : recreate?
}
return argumentTypes;
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(context.scope, arrayExpression, false, context);
List<JetExpression> indexExpressions = expression.getIndexExpressions();
List<JetType> argumentTypes = getTypes(context.scope, indexExpressions, context);
if (argumentTypes == null) return null;
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.lookupFunction(context.scope, expression, "get", receiverType, argumentTypes, true);
if (functionDescriptor != null) {
// checkNullSafety(receiverType, expression.getIndexExpressions().get(0).getNode(), functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(
@NotNull JetBinaryExpression expression,
@NotNull String name,
@NotNull JetScope scope,
boolean reportUnresolved,
@NotNull TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) {
return null;
}
JetSimpleNameExpression operationSign = expression.getOperationReference();
return getTypeForBinaryCall(scope, left, operationSign, right, name, reportUnresolved, context);
}
@Nullable
private JetType getTypeForBinaryCall(
@NotNull JetScope scope,
@NotNull JetExpression left,
@NotNull JetSimpleNameExpression operationSign,
@NotNull JetExpression right,
@NotNull String name,
boolean reportUnresolved,
@NotNull TypeInferenceContext context) {
JetType leftType = getType(scope, left, false, context);
JetType rightType = getType(scope, right, false, context);
if (leftType == null || rightType == null) {
return null;
}
FunctionDescriptor functionDescriptor = context.services.lookupFunction(scope, operationSign, name, leftType, Collections.singletonList(rightType), reportUnresolved);
if (functionDescriptor != null) {
if (leftType.isNullable()) {
// TODO : better error message for '1 + nullableVar' case
context.trace.getErrorHandler().genericError(operationSign.getNode(),
"Infix call corresponds to a dot-qualified call '" +
left.getText() + "." + name + "(" + right.getText() + ")'" +
" which is not allowed on a nullable receiver '" + right.getText() + "'." +
" Use '?.'-qualified call instead");
}
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(context.scope, entryExpression, true, context);
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(scope, initializer, false, context.replaceExpectedType(outType));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
JetType assignmentOperationType = getTypeForBinaryCall(expression, name, scope, false, context);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(expression, counterpartName, scope, true, context);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(scope, left, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (right != null) {
JetType rightType = getType(scope, right, false, context.replaceExpectedType(leftType));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
List<JetType> argumentTypes = getTypes(scope, arrayAccessExpression.getIndexExpressions(), context);
if (argumentTypes == null) return null;
JetType rhsType = getType(scope, rightHandSide, false, context);
if (rhsType == null) return null;
argumentTypes.add(rhsType);
JetType receiverType = getType(scope, arrayAccessExpression.getArrayExpression(), false, context);
if (receiverType == null) return null;
// TODO : nasty hack: effort is duplicated
context.services.lookupFunction(scope, arrayAccessExpression, "set", receiverType, argumentTypes, true);
FunctionDescriptor functionDescriptor = context.services.lookupFunction(scope, operationSign, "set", receiverType, argumentTypes, true);
if (functionDescriptor == null) return null;
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
Left
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.PsiElement;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lang.types.inference.ConstraintSystem;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final ClassDescriptorResolver classDescriptorResolver;
private final TypeResolver typeResolver;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final AnnotationResolver annotationResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.annotationResolver = new AnnotationResolver(semanticServices, trace);
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, preferBlock, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, new TypeInferenceContext(trace, scope, preferBlock, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock) {
return typeInferrerVisitorWithNamespaces.getType(expression, new TypeInferenceContext(trace, scope, preferBlock, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
@Nullable
private FunctionDescriptor lookupFunction(
@NotNull JetScope scope,
@NotNull JetReferenceExpression reference,
@NotNull String name,
@NotNull JetType receiverType,
@NotNull List<JetType> argumentTypes,
boolean reportUnresolved) {
OverloadDomain overloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, name);
// No generics. Guaranteed
overloadDomain = wrapForTracing(overloadDomain, reference, null, reportUnresolved);
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(Collections.<JetType>emptyList(), argumentTypes);
return resolutionResult.isSuccess() ? resolutionResult.getFunctionDescriptor() : null;
}
@NotNull
private OverloadResolutionResult resolveNoParametersFunction(@NotNull JetType receiverType, @NotNull JetScope scope, @NotNull String name) {
OverloadDomain overloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, name);
// No generics. Guaranteed
return overloadDomain.getFunctionDescriptorForPositionedArguments(Collections.<JetType>emptyList(), Collections.<JetType>emptyList());
}
private OverloadDomain getOverloadDomain(
@Nullable final JetType receiverType,
@NotNull final JetScope scope,
@NotNull JetExpression calleeExpression,
@Nullable PsiElement argumentList
) {
final OverloadDomain[] result = new OverloadDomain[1];
final JetSimpleNameExpression[] reference = new JetSimpleNameExpression[1];
calleeExpression.accept(new JetVisitorVoid() {
@Override
public void visitHashQualifiedExpression(JetHashQualifiedExpression expression) {
// a#b -- create a domain for all overloads of b in a
throw new UnsupportedOperationException(); // TODO
}
@Override
public void visitPredicateExpression(JetPredicateExpression expression) {
// overload lookup for checking, but the type is receiver's type + nullable
throw new UnsupportedOperationException(); // TODO
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
trace.getErrorHandler().genericError(expression.getNode(), "Unsupported [JetTypeInferrer]");
// . or ?.
// JetType receiverType = getType(scope, expression.getReceiverExpression(), false);
// checkNullSafety(receiverType, expression.getOperationTokenNode());
//
// JetExpression selectorExpression = expression.getSelectorExpression();
// if (selectorExpression instanceof JetSimpleNameExpression) {
// JetSimpleNameExpression referenceExpression = (JetSimpleNameExpression) selectorExpression;
// String referencedName = referenceExpression.getReferencedName();
//
// if (receiverType != null && referencedName != null) {
// // No generics. Guaranteed
// result[0] = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, referencedName);
// reference[0] = referenceExpression;
// }
// } else {
// throw new UnsupportedOperationException(); // TODO
// }
}
@Override
public void visitSimpleNameExpression(JetSimpleNameExpression expression) {
// a -- create a hierarchical lookup domain for this.a
String referencedName = expression.getReferencedName();
if (referencedName != null) {
// No generics. Guaranteed
result[0] = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, referencedName);
reference[0] = expression;
}
}
@Override
public void visitExpression(JetExpression expression) {
// <e> create a dummy domain for the type of e
throw new UnsupportedOperationException(expression.getText()); // TODO
}
@Override
public void visitJetElement(JetElement element) {
trace.getErrorHandler().genericError(element.getNode(), "Unsupported in call element"); // TODO : Message
}
});
return wrapForTracing(result[0], reference[0], argumentList, true);
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
private OverloadDomain wrapForTracing(
@NotNull final OverloadDomain overloadDomain,
@NotNull final JetReferenceExpression referenceExpression,
@Nullable final PsiElement argumentList,
final boolean reportErrors) {
return new OverloadDomain() {
@NotNull
@Override
public OverloadResolutionResult getFunctionDescriptorForNamedArguments(@NotNull List<JetType> typeArguments, @NotNull Map<String, JetType> valueArgumentTypes, @Nullable JetType functionLiteralArgumentType) {
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForNamedArguments(typeArguments, valueArgumentTypes, functionLiteralArgumentType);
report(resolutionResult);
return resolutionResult;
}
@NotNull
@Override
public OverloadResolutionResult getFunctionDescriptorForPositionedArguments(@NotNull List<JetType> typeArguments, @NotNull List<JetType> positionedValueArgumentTypes) {
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(typeArguments, positionedValueArgumentTypes);
report(resolutionResult);
return resolutionResult;
}
private void report(OverloadResolutionResult resolutionResult) {
if (resolutionResult.isSuccess() || resolutionResult.singleFunction()) {
trace.record(BindingContext.REFERENCE_TARGET, referenceExpression, resolutionResult.getFunctionDescriptor());
}
if (reportErrors) {
switch (resolutionResult.getResultCode()) {
case NAME_NOT_FOUND:
trace.getErrorHandler().unresolvedReference(referenceExpression);
break;
case SINGLE_FUNCTION_ARGUMENT_MISMATCH:
if (argumentList != null) {
// TODO : More helpful message. NOTE: there's a separate handling for this for constructors
trace.getErrorHandler().genericError(argumentList.getNode(), "Arguments do not match " + DescriptorRenderer.TEXT.render(resolutionResult.getFunctionDescriptor()));
}
else {
trace.getErrorHandler().unresolvedReference(referenceExpression);
}
break;
case AMBIGUITY:
if (argumentList != null) {
// TODO : More helpful message. NOTE: there's a separate handling for this for constructors
trace.getErrorHandler().genericError(argumentList.getNode(), "Overload ambiguity [TODO : more helpful message]");
}
else {
trace.getErrorHandler().unresolvedReference(referenceExpression);
}
break;
default:
// Not a success
}
}
}
@Override
public boolean isEmpty() {
return overloadDomain.isEmpty();
}
};
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), dataFlowInfo, expectedReturnType, FORBIDDEN);
typeInferrerVisitor.getType(bodyExpression, context);
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = new TypeInferenceContext(trace, scope, true, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
newContext = new TypeInferenceContext(trace, scope, true, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
}
result = blockLevelVisitor.getType(statementExpression, newContext);
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = new TypeInferenceContext(trace, scope, true, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
@Nullable
private JetType resolveCall(
@NotNull JetScope scope,
@NotNull JetCall call,
@NotNull JetType expectedType
) {
if (call.getTypeArguments().isEmpty()) {
JetExpression calleeExpression = call.getCalleeExpression();
Collection<FunctionDescriptor> candidates;
if (calleeExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression expression = (JetSimpleNameExpression) calleeExpression;
candidates = scope.getFunctionGroup(expression.getReferencedName()).getFunctionDescriptors();
}
else {
throw new UnsupportedOperationException("Type argument inference not implemented");
}
assert candidates.size() == 1;
FunctionDescriptor candidate = candidates.iterator().next();
assert candidate.getTypeParameters().size() == call.getTypeArguments().size();
ConstraintSystem constraintSystem = new ConstraintSystem();
for (TypeParameterDescriptor typeParameterDescriptor : candidate.getTypeParameters()) {
constraintSystem.registerTypeVariable(typeParameterDescriptor, Variance.INVARIANT); // TODO
}
Iterator<ValueParameterDescriptor> parameters = candidate.getValueParameters().iterator();
for (JetValueArgument valueArgument : call.getValueArguments()) {
assert !valueArgument.isNamed();
ValueParameterDescriptor valueParameterDescriptor = parameters.next();
JetExpression expression = valueArgument.getArgumentExpression();
JetType type = getType(scope, expression, false, NO_EXPECTED_TYPE);
constraintSystem.addSubtypingConstraint(type, valueParameterDescriptor.getOutType());
}
if (expectedType != NO_EXPECTED_TYPE) {
System.out.println("expectedType = " + expectedType);
constraintSystem.addSubtypingConstraint(candidate.getReturnType(), expectedType);
}
ConstraintSystem.Solution solution = constraintSystem.solve();
if (!solution.isSuccessful()) {
trace.getErrorHandler().genericError(calleeExpression.getNode(), "Type inference failed");
// for (Inconsistency inconsistency : solution.getInconsistencies()) {
// System.out.println("inconsistency = " + inconsistency);
// }
return null;
}
else {
for (TypeParameterDescriptor typeParameterDescriptor : candidate.getTypeParameters()) {
JetType value = solution.getValue(typeParameterDescriptor);
System.out.println("typeParameterDescriptor = " + typeParameterDescriptor);
System.out.println("value = " + value);
}
return solution.getSubstitutor().substitute(candidate.getReturnType(), Variance.INVARIANT); // TODO
}
// return null;
}
else {
throw new UnsupportedOperationException("Explicit type arguments not implemented");
}
}
@Nullable
private JetType resolveCall(
@NotNull JetScope scope,
@NotNull OverloadDomain overloadDomain,
@NotNull JetCall call) {
// 1) ends with a name -> (scope, name) to look up
// 2) ends with something else -> just check types
final List<JetTypeProjection> jetTypeArguments = call.getTypeArguments();
for (JetTypeProjection typeArgument : jetTypeArguments) {
if (typeArgument.getProjectionKind() != JetProjectionKind.NONE) {
trace.getErrorHandler().genericError(typeArgument.getNode(), "Projections are not allowed on type parameters for methods"); // TODO : better positioning
}
}
List<JetType> typeArguments = new ArrayList<JetType>();
for (JetTypeProjection projection : jetTypeArguments) {
// TODO : check that there's no projection
JetTypeReference typeReference = projection.getTypeReference();
if (typeReference != null) {
typeArguments.add(new TypeResolver(semanticServices, trace, true).resolveType(scope, typeReference));
}
}
return resolveCallWithTypeArguments(scope, overloadDomain, call, typeArguments);
}
private JetType resolveCallWithTypeArguments(JetScope scope, OverloadDomain overloadDomain, JetCall call, List<JetType> typeArguments) {
final List<JetValueArgument> valueArguments = call.getValueArguments();
boolean someNamed = false;
for (JetValueArgument argument : valueArguments) {
if (argument.isNamed()) {
someNamed = true;
break;
}
}
final List<JetExpression> functionLiteralArguments = call.getFunctionLiteralArguments();
// TODO : must be a check
assert functionLiteralArguments.size() <= 1;
if (someNamed) {
// TODO : check that all are named
trace.getErrorHandler().genericError(call.asElement().getNode(), "Named arguments are not supported"); // TODO
} else {
List<JetExpression> positionedValueArguments = new ArrayList<JetExpression>();
for (JetValueArgument argument : valueArguments) {
JetExpression argumentExpression = argument.getArgumentExpression();
if (argumentExpression != null) {
positionedValueArguments.add(argumentExpression);
}
}
positionedValueArguments.addAll(functionLiteralArguments);
List<JetType> valueArgumentTypes = new ArrayList<JetType>();
for (JetExpression valueArgument : positionedValueArguments) {
valueArgumentTypes.add(safeGetType(scope, valueArgument, false, NO_EXPECTED_TYPE)); // TODO
}
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(typeArguments, valueArgumentTypes);
if (resolutionResult.isSuccess()) {
final FunctionDescriptor functionDescriptor = resolutionResult.getFunctionDescriptor();
checkGenericBoundsInAFunctionCall(call.getTypeArguments(), typeArguments, functionDescriptor);
return functionDescriptor.getReturnType();
}
}
return null;
}
private void checkGenericBoundsInAFunctionCall(List<JetTypeProjection> jetTypeArguments, List<JetType> typeArguments, FunctionDescriptor functionDescriptor) {
Map<TypeConstructor, TypeProjection> context = Maps.newHashMap();
List<TypeParameterDescriptor> typeParameters = functionDescriptor.getOriginal().getTypeParameters();
for (int i = 0, typeParametersSize = typeParameters.size(); i < typeParametersSize; i++) {
TypeParameterDescriptor typeParameter = typeParameters.get(i);
JetType typeArgument = typeArguments.get(i);
context.put(typeParameter.getTypeConstructor(), new TypeProjection(typeArgument));
}
TypeSubstitutor substitutor = TypeSubstitutor.create(context);
for (int i = 0, typeParametersSize = typeParameters.size(); i < typeParametersSize; i++) {
TypeParameterDescriptor typeParameterDescriptor = typeParameters.get(i);
JetType typeArgument = typeArguments.get(i);
JetTypeReference typeReference = jetTypeArguments.get(i).getTypeReference();
assert typeReference != null;
classDescriptorResolver.checkBounds(typeReference, typeArgument, typeParameterDescriptor, substitutor);
}
}
@Nullable
public JetType checkTypeInitializerCall(JetScope scope, @NotNull JetTypeReference typeReference, @NotNull JetCall call) {
JetTypeElement typeElement = typeReference.getTypeElement();
if (typeElement instanceof JetUserType) {
JetUserType userType = (JetUserType) typeElement;
// TODO : to infer constructor parameters, one will need to
// 1) resolve a _class_ from the typeReference
// 2) rely on the overload domain of constructors of this class to infer type arguments
// For now we assume that the type arguments are provided, and thus the typeReference can be
// resolved into a valid type
JetType receiverType = typeResolver.resolveType(scope, typeReference);
DeclarationDescriptor declarationDescriptor = receiverType.getConstructor().getDeclarationDescriptor();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
for (JetTypeProjection typeProjection : userType.getTypeArguments()) {
switch (typeProjection.getProjectionKind()) {
case IN:
case OUT:
case STAR:
// TODO : Bug in the editor
trace.getErrorHandler().genericError(typeProjection.getProjectionNode(), "Projections are not allowed in constructor type arguments");
break;
case NONE:
break;
}
}
JetSimpleNameExpression referenceExpression = userType.getReferenceExpression();
if (referenceExpression != null) {
return checkClassConstructorCall(scope, referenceExpression, classDescriptor, receiverType, call);
}
}
else {
trace.getErrorHandler().genericError(((JetElement) call).getNode(), "Calling a constructor is only supported for ordinary classes"); // TODO : review the message
}
return null;
}
else {
if (typeElement != null) {
trace.getErrorHandler().genericError(typeElement.getNode(), "Calling a constructor is only supported for ordinary classes"); // TODO : Better message
}
}
return null;
}
@Nullable
public JetType checkClassConstructorCall(
@NotNull JetScope scope,
@NotNull JetReferenceExpression referenceExpression,
@NotNull ClassDescriptor classDescriptor,
@NotNull JetType receiverType,
@NotNull JetCall call) {
// When one writes 'new Array<in T>(...)' this does not make much sense, and an instance
// of 'Array<T>' must be created anyway.
// Thus, we should either prohibit projections in type arguments in such contexts,
// or treat them as an automatic upcast to the desired type, i.e. for the user not
// to be forced to write
// val a : Array<in T> = new Array<T>(...)
// NOTE: Array may be a bad example here, some classes may have substantial functionality
// not involving their type parameters
//
// The code below upcasts the type automatically
FunctionGroup constructors = classDescriptor.getConstructors();
OverloadDomain constructorsOverloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(null, constructors);
JetType constructorReturnedType;
if (call instanceof JetDelegatorToThisCall) {
List<TypeProjection> typeArguments = receiverType.getArguments();
List<JetType> projectionsStripped = Lists.newArrayList();
for (TypeProjection typeArgument : typeArguments) {
projectionsStripped.add(typeArgument.getType());
}
constructorReturnedType = resolveCallWithTypeArguments(
scope,
wrapForTracing(constructorsOverloadDomain, referenceExpression, call.getValueArgumentList(), false),
call, projectionsStripped);
}
else {
constructorReturnedType = resolveCall(
scope,
wrapForTracing(constructorsOverloadDomain, referenceExpression, call.getValueArgumentList(), false),
call);
}
if (constructorReturnedType == null && !ErrorUtils.isErrorType(receiverType)) {
DeclarationDescriptor declarationDescriptor = receiverType.getConstructor().getDeclarationDescriptor();
assert declarationDescriptor != null;
trace.record(BindingContext.REFERENCE_TARGET, referenceExpression, declarationDescriptor);
// TODO : more helpful message
JetValueArgumentList argumentList = call.getValueArgumentList();
final String errorMessage = "Cannot find a constructor overload for class " + classDescriptor.getName() + " with these arguments";
if (argumentList != null) {
trace.getErrorHandler().genericError(argumentList.getNode(), errorMessage);
}
else {
trace.getErrorHandler().genericError(call.asElement().getNode(), errorMessage);
}
constructorReturnedType = receiverType;
}
// If no upcast needed:
return constructorReturnedType;
// Automatic upcast:
// result = receiverType;
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType : possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final boolean preferBlock;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
boolean preferBlock,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.preferBlock = preferBlock;
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return new TypeInferenceContext(trace, scope, preferBlock, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return new TypeInferenceContext(trace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return new TypeInferenceContext(trace, scope, preferBlock, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public JetType getType(@NotNull JetScope scope, @NotNull JetExpression expression, boolean preferBlock, TypeInferenceContext context) {
return getType(expression, new TypeInferenceContext(context.trace, scope, preferBlock, context.dataFlowInfo, context.expectedType, context.expectedReturnType));
}
private JetType getTypeWithNewDataFlowInfo(JetScope scope, JetExpression expression, boolean preferBlock, @NotNull DataFlowInfo newDataFlowInfo, TypeInferenceContext context) {
return getType(expression, new TypeInferenceContext(context.trace, scope, preferBlock, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if ((boolean) context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type inference has run into a recursive problem"); // TODO : message
result = null;
}
if (!(boolean) context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return context.services.checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return context.services.getBlockReturnedTypeWithWritableScope(scope, block, context);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(BindingContext.REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
if (referencedName != null) {
VariableDescriptor variable = context.scope.getVariable(referencedName);
if (variable != null) {
context.trace.record(BindingContext.REFERENCE_TARGET, expression, variable);
JetType result = variable.getOutType();
if (result == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(BindingContext.REFERENCE_TARGET, expression, classifier);
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
}
context.trace.getErrorHandler().unresolvedReference(expression);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(BindingContext.REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
if (context.preferBlock && !functionLiteral.hasParameterSpecification()) {
context.trace.record(BindingContext.BLOCK, expression);
return context.services.checkType(getBlockReturnedType(context.scope, functionLiteral.getBodyExpression(), context), expression, context);
}
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Type inference for parameters is not implemented yet");
type = ErrorUtils.createErrorType("Not inferred");
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType = receiverTypeRef == null ? null : receiverType;
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo);
}
else {
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(context.expectedType)) {
returnType = JetStandardClasses.getReturnType(context.expectedType);
}
returnType = getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(context.scope, expression.getExpression(), false, context), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return null;
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(context.scope, thrownExpression, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(context.scope, returnedExpression, false, context.replaceExpectedType(context.expectedReturnType));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, false, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(BindingContext.REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(BindingContext.REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
return getBlockReturnedType(context.scope, expression, context);
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, false, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewDataFlowInfo(scopeToExtend, bodyExpression, true, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetType selectorReturnType = getSelectorReturnType(subjectType, pattern.getDecomposerExpression(), context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(scopeToExtend, expression, false, context);
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchScope, catchBody, true, context);
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(context.scope, finallyBlock.getFinalExpression(), true, context);
if (type != null) {
types.add(type);
}
}
JetType type = getType(context.scope, tryBlock, true, context);
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewDataFlowInfo(thenScope, thenBranch, true, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewDataFlowInfo(context.scope, elseBranch, true, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewDataFlowInfo(thenScope, thenBranch, true, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewDataFlowInfo(context.scope, elseBranch, true, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(scope, condition, false, context);
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewDataFlowInfo(scopeToExtend, body, true, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(context.scope, body, true, context);
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(context.scope, loopRange, false, context);
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(loopScope, body, true, context); // TODO
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
ASTNode reportErrorsOn = loopRange.getNode();
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
if (iteratorResolutionResult.isSuccess()) {
FunctionDescriptor iteratorFunction = iteratorResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_ITERATOR, loopRange, iteratorFunction);
JetType iteratorType = iteratorFunction.getReturnType();
FunctionDescriptor hasNextFunction = checkHasNextFunctionSupport(loopRange, iteratorType, context);
boolean hasNextFunctionSupported = hasNextFunction != null;
VariableDescriptor hasNextProperty = checkHasNextPropertySupport(loopRange, iteratorType, context);
boolean hasNextPropertySupported = hasNextProperty != null;
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
else {
context.trace.record(LOOP_RANGE_HAS_NEXT, loopRange, hasNextFunctionSupported ? hasNextFunction : hasNextProperty);
}
OverloadResolutionResult nextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "next");
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
FunctionDescriptor nextFunction = nextResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_NEXT, loopRange, nextFunction);
return nextFunction.getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return null;
} else {
assert hasNextResolutionResult.isSuccess();
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextResolutionResult.getFunctionDescriptor();
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, new TypeInferenceContext(context.trace, context.scope, false, context.dataFlowInfo, NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (receiverType == null) return null;
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
// ErrorHandler errorHandler = context.trace.getErrorHandler();
// errorHandler.openRegion();
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.addAllMyDataTo(context.trace);
somethingFound = true;
break;
}
else {
autocastResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
private JetType getCallExpressionType(@Nullable JetType receiverType, @NotNull JetCallExpression callExpression, TypeInferenceContext context) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression == null) {
return null;
}
OverloadDomain overloadDomain = context.services.getOverloadDomain(receiverType, context.scope, calleeExpression, callExpression.getValueArgumentList());
return context.services.resolveCall(context.scope, overloadDomain, callExpression);
}
private JetType getSelectorReturnType(JetType receiverType, JetExpression selectorExpression, TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return getCallExpressionType(receiverType, (JetCallExpression) selectorExpression, context);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetScope compositeScope = new ScopeWithReceiver(context.scope, receiverType, semanticServices.getTypeChecker());
return getType(compositeScope, selectorExpression, false, context);
}
else if (selectorExpression != null) {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
// return context.services.checkType(context.services.resolveCall(context.scope, expression, context.expectedType), expression, context);
return context.services.checkType(getCallExpressionType(null, expression, context), expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(context.scope, expression.getLeftHandSide(), false, context);
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(context.scope, baseExpression, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.lookupFunction(context.scope, expression.getOperationSign(), name, receiverType, Collections.<JetType>emptyList(), true);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(expression, referencedName, context.scope, true, context);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(expression, binaryOperationNames.get(operationType), context.scope, true, context);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(expression, "compareTo", context.scope, true, context);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(context.scope, left, false, context);
if (leftType != null) {
JetType rightType = getType(context.scope, right, false, context);
if (rightType != null) {
FunctionDescriptor equals = context.services.lookupFunction(
context.scope, operationSign, "equals",
leftType, Collections.singletonList(JetStandardClasses.getNullableAnyType()), false);
if (equals != null) {
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(context.scope, left, false, context);
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getTypeWithNewDataFlowInfo(rightScope, right, false, flowInfoLeft, context);
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(context.scope, left, false, context);
JetType rightType = right == null ? null : getType(context.scope, right, false, contextWithExpectedType);
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType containsType = getTypeForBinaryCall(context.scope, right, operationSign, left, name, true, context);
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(context.scope, left, false, context);
if (leftType != null && right != null) {
JetType rightType = getType(context.scope, right, false, context);
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Nullable
protected List<JetType> getTypes(JetScope scope, List<JetExpression> indexExpressions, TypeInferenceContext context) {
List<JetType> argumentTypes = new ArrayList<JetType>();
TypeInferenceContext newContext = new TypeInferenceContext(context.trace, scope, false, context.dataFlowInfo, NO_EXPECTED_TYPE, NO_EXPECTED_TYPE);
for (JetExpression indexExpression : indexExpressions) {
JetType type = context.services.typeInferrerVisitor.getType(indexExpression, newContext);
if (type == null) {
return null;
}
argumentTypes.add(type);
context.services.typeInferrerVisitor.resetResult(); // TODO : recreate?
}
return argumentTypes;
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(context.scope, arrayExpression, false, context);
List<JetExpression> indexExpressions = expression.getIndexExpressions();
List<JetType> argumentTypes = getTypes(context.scope, indexExpressions, context);
if (argumentTypes == null) return null;
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.lookupFunction(context.scope, expression, "get", receiverType, argumentTypes, true);
if (functionDescriptor != null) {
// checkNullSafety(receiverType, expression.getIndexExpressions().get(0).getNode(), functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(
@NotNull JetBinaryExpression expression,
@NotNull String name,
@NotNull JetScope scope,
boolean reportUnresolved,
@NotNull TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) {
return null;
}
JetSimpleNameExpression operationSign = expression.getOperationReference();
return getTypeForBinaryCall(scope, left, operationSign, right, name, reportUnresolved, context);
}
@Nullable
private JetType getTypeForBinaryCall(
@NotNull JetScope scope,
@NotNull JetExpression left,
@NotNull JetSimpleNameExpression operationSign,
@NotNull JetExpression right,
@NotNull String name,
boolean reportUnresolved,
@NotNull TypeInferenceContext context) {
JetType leftType = getType(scope, left, false, context);
JetType rightType = getType(scope, right, false, context);
if (leftType == null || rightType == null) {
return null;
}
FunctionDescriptor functionDescriptor = context.services.lookupFunction(scope, operationSign, name, leftType, Collections.singletonList(rightType), reportUnresolved);
if (functionDescriptor != null) {
if (leftType.isNullable()) {
// TODO : better error message for '1 + nullableVar' case
context.trace.getErrorHandler().genericError(operationSign.getNode(),
"Infix call corresponds to a dot-qualified call '" +
left.getText() + "." + name + "(" + right.getText() + ")'" +
" which is not allowed on a nullable receiver '" + right.getText() + "'." +
" Use '?.'-qualified call instead");
}
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(context.scope, entryExpression, true, context);
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(scope, initializer, false, context.replaceExpectedType(outType));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
JetType assignmentOperationType = getTypeForBinaryCall(expression, name, scope, false, context);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(expression, counterpartName, scope, true, context);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(scope, left, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (right != null) {
JetType rightType = getType(scope, right, false, context.replaceExpectedType(leftType));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
List<JetType> argumentTypes = getTypes(scope, arrayAccessExpression.getIndexExpressions(), context);
if (argumentTypes == null) return null;
JetType rhsType = getType(scope, rightHandSide, false, context);
if (rhsType == null) return null;
argumentTypes.add(rhsType);
JetType receiverType = getType(scope, arrayAccessExpression.getArrayExpression(), false, context);
if (receiverType == null) return null;
// TODO : nasty hack: effort is duplicated
context.services.lookupFunction(scope, arrayAccessExpression, "set", receiverType, argumentTypes, true);
FunctionDescriptor functionDescriptor = context.services.lookupFunction(scope, operationSign, "set", receiverType, argumentTypes, true);
if (functionDescriptor == null) return null;
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.PsiElement;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lang.types.inference.ConstraintSystem;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final ClassDescriptorResolver classDescriptorResolver;
private final TypeResolver typeResolver;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final AnnotationResolver annotationResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.annotationResolver = new AnnotationResolver(semanticServices, trace);
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, preferBlock, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, new TypeInferenceContext(trace, scope, preferBlock, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression, boolean preferBlock) {
return typeInferrerVisitorWithNamespaces.getType(expression, new TypeInferenceContext(trace, scope, preferBlock, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
@Nullable
private FunctionDescriptor lookupFunction(
@NotNull JetScope scope,
@NotNull JetReferenceExpression reference,
@NotNull String name,
@NotNull JetType receiverType,
@NotNull List<JetType> argumentTypes,
boolean reportUnresolved) {
OverloadDomain overloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, name);
// No generics. Guaranteed
overloadDomain = wrapForTracing(overloadDomain, reference, null, reportUnresolved);
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(Collections.<JetType>emptyList(), argumentTypes);
return resolutionResult.isSuccess() ? resolutionResult.getFunctionDescriptor() : null;
}
@NotNull
private OverloadResolutionResult resolveNoParametersFunction(@NotNull JetType receiverType, @NotNull JetScope scope, @NotNull String name) {
OverloadDomain overloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, name);
// No generics. Guaranteed
return overloadDomain.getFunctionDescriptorForPositionedArguments(Collections.<JetType>emptyList(), Collections.<JetType>emptyList());
}
private OverloadDomain getOverloadDomain(
@Nullable final JetType receiverType,
@NotNull final JetScope scope,
@NotNull JetExpression calleeExpression,
@Nullable PsiElement argumentList
) {
final OverloadDomain[] result = new OverloadDomain[1];
final JetSimpleNameExpression[] reference = new JetSimpleNameExpression[1];
calleeExpression.accept(new JetVisitorVoid() {
@Override
public void visitHashQualifiedExpression(JetHashQualifiedExpression expression) {
// a#b -- create a domain for all overloads of b in a
throw new UnsupportedOperationException(); // TODO
}
@Override
public void visitPredicateExpression(JetPredicateExpression expression) {
// overload lookup for checking, but the type is receiver's type + nullable
throw new UnsupportedOperationException(); // TODO
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
trace.getErrorHandler().genericError(expression.getNode(), "Unsupported [JetTypeInferrer]");
// . or ?.
// JetType receiverType = getType(scope, expression.getReceiverExpression(), false);
// checkNullSafety(receiverType, expression.getOperationTokenNode());
//
// JetExpression selectorExpression = expression.getSelectorExpression();
// if (selectorExpression instanceof JetSimpleNameExpression) {
// JetSimpleNameExpression referenceExpression = (JetSimpleNameExpression) selectorExpression;
// String referencedName = referenceExpression.getReferencedName();
//
// if (receiverType != null && referencedName != null) {
// // No generics. Guaranteed
// result[0] = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, referencedName);
// reference[0] = referenceExpression;
// }
// } else {
// throw new UnsupportedOperationException(); // TODO
// }
}
@Override
public void visitSimpleNameExpression(JetSimpleNameExpression expression) {
// a -- create a hierarchical lookup domain for this.a
String referencedName = expression.getReferencedName();
if (referencedName != null) {
// No generics. Guaranteed
result[0] = semanticServices.getOverloadResolver().getOverloadDomain(receiverType, scope, referencedName);
reference[0] = expression;
}
}
@Override
public void visitExpression(JetExpression expression) {
// <e> create a dummy domain for the type of e
throw new UnsupportedOperationException(expression.getText()); // TODO
}
@Override
public void visitJetElement(JetElement element) {
trace.getErrorHandler().genericError(element.getNode(), "Unsupported in call element"); // TODO : Message
}
});
return wrapForTracing(result[0], reference[0], argumentList, true);
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
private OverloadDomain wrapForTracing(
@NotNull final OverloadDomain overloadDomain,
@NotNull final JetReferenceExpression referenceExpression,
@Nullable final PsiElement argumentList,
final boolean reportErrors) {
return new OverloadDomain() {
@NotNull
@Override
public OverloadResolutionResult getFunctionDescriptorForNamedArguments(@NotNull List<JetType> typeArguments, @NotNull Map<String, JetType> valueArgumentTypes, @Nullable JetType functionLiteralArgumentType) {
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForNamedArguments(typeArguments, valueArgumentTypes, functionLiteralArgumentType);
report(resolutionResult);
return resolutionResult;
}
@NotNull
@Override
public OverloadResolutionResult getFunctionDescriptorForPositionedArguments(@NotNull List<JetType> typeArguments, @NotNull List<JetType> positionedValueArgumentTypes) {
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(typeArguments, positionedValueArgumentTypes);
report(resolutionResult);
return resolutionResult;
}
private void report(OverloadResolutionResult resolutionResult) {
if (resolutionResult.isSuccess() || resolutionResult.singleFunction()) {
trace.record(BindingContext.REFERENCE_TARGET, referenceExpression, resolutionResult.getFunctionDescriptor());
}
if (reportErrors) {
switch (resolutionResult.getResultCode()) {
case NAME_NOT_FOUND:
trace.getErrorHandler().unresolvedReference(referenceExpression);
break;
case SINGLE_FUNCTION_ARGUMENT_MISMATCH:
if (argumentList != null) {
// TODO : More helpful message. NOTE: there's a separate handling for this for constructors
trace.getErrorHandler().genericError(argumentList.getNode(), "Arguments do not match " + DescriptorRenderer.TEXT.render(resolutionResult.getFunctionDescriptor()));
}
else {
trace.getErrorHandler().unresolvedReference(referenceExpression);
}
break;
case AMBIGUITY:
if (argumentList != null) {
// TODO : More helpful message. NOTE: there's a separate handling for this for constructors
trace.getErrorHandler().genericError(argumentList.getNode(), "Overload ambiguity [TODO : more helpful message]");
}
else {
trace.getErrorHandler().unresolvedReference(referenceExpression);
}
break;
default:
// Not a success
}
}
}
@Override
public boolean isEmpty() {
return overloadDomain.isEmpty();
}
};
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), dataFlowInfo, expectedReturnType, FORBIDDEN);
typeInferrerVisitor.getType(bodyExpression, context);
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, new TypeInferenceContext(trace, functionInnerScope, function.hasBlockBody(), DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = new TypeInferenceContext(trace, scope, true, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
newContext = new TypeInferenceContext(trace, scope, true, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
}
result = blockLevelVisitor.getType(statementExpression, newContext);
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = new TypeInferenceContext(trace, scope, true, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
@Nullable
private JetType resolveCall(
@NotNull JetScope scope,
@NotNull JetCall call,
@NotNull JetType expectedType
) {
if (call.getTypeArguments().isEmpty()) {
JetExpression calleeExpression = call.getCalleeExpression();
Collection<FunctionDescriptor> candidates;
if (calleeExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression expression = (JetSimpleNameExpression) calleeExpression;
candidates = scope.getFunctionGroup(expression.getReferencedName()).getFunctionDescriptors();
}
else {
throw new UnsupportedOperationException("Type argument inference not implemented");
}
assert candidates.size() == 1;
FunctionDescriptor candidate = candidates.iterator().next();
assert candidate.getTypeParameters().size() == call.getTypeArguments().size();
ConstraintSystem constraintSystem = new ConstraintSystem();
for (TypeParameterDescriptor typeParameterDescriptor : candidate.getTypeParameters()) {
constraintSystem.registerTypeVariable(typeParameterDescriptor, Variance.INVARIANT); // TODO
}
Iterator<ValueParameterDescriptor> parameters = candidate.getValueParameters().iterator();
for (JetValueArgument valueArgument : call.getValueArguments()) {
assert !valueArgument.isNamed();
ValueParameterDescriptor valueParameterDescriptor = parameters.next();
JetExpression expression = valueArgument.getArgumentExpression();
JetType type = getType(scope, expression, false, NO_EXPECTED_TYPE);
constraintSystem.addSubtypingConstraint(type, valueParameterDescriptor.getOutType());
}
if (expectedType != NO_EXPECTED_TYPE) {
System.out.println("expectedType = " + expectedType);
constraintSystem.addSubtypingConstraint(candidate.getReturnType(), expectedType);
}
ConstraintSystem.Solution solution = constraintSystem.solve();
if (!solution.isSuccessful()) {
trace.getErrorHandler().genericError(calleeExpression.getNode(), "Type inference failed");
// for (Inconsistency inconsistency : solution.getInconsistencies()) {
// System.out.println("inconsistency = " + inconsistency);
// }
return null;
}
else {
for (TypeParameterDescriptor typeParameterDescriptor : candidate.getTypeParameters()) {
JetType value = solution.getValue(typeParameterDescriptor);
System.out.println("typeParameterDescriptor = " + typeParameterDescriptor);
System.out.println("value = " + value);
}
return solution.getSubstitutor().substitute(candidate.getReturnType(), Variance.INVARIANT); // TODO
}
// return null;
}
else {
throw new UnsupportedOperationException("Explicit type arguments not implemented");
}
}
@Nullable
private JetType resolveCall(
@NotNull JetScope scope,
@NotNull OverloadDomain overloadDomain,
@NotNull JetCall call) {
// 1) ends with a name -> (scope, name) to look up
// 2) ends with something else -> just check types
final List<JetTypeProjection> jetTypeArguments = call.getTypeArguments();
for (JetTypeProjection typeArgument : jetTypeArguments) {
if (typeArgument.getProjectionKind() != JetProjectionKind.NONE) {
trace.getErrorHandler().genericError(typeArgument.getNode(), "Projections are not allowed on type parameters for methods"); // TODO : better positioning
}
}
List<JetType> typeArguments = new ArrayList<JetType>();
for (JetTypeProjection projection : jetTypeArguments) {
// TODO : check that there's no projection
JetTypeReference typeReference = projection.getTypeReference();
if (typeReference != null) {
typeArguments.add(new TypeResolver(semanticServices, trace, true).resolveType(scope, typeReference));
}
}
return resolveCallWithTypeArguments(scope, overloadDomain, call, typeArguments);
}
private JetType resolveCallWithTypeArguments(JetScope scope, OverloadDomain overloadDomain, JetCall call, List<JetType> typeArguments) {
final List<JetValueArgument> valueArguments = call.getValueArguments();
boolean someNamed = false;
for (JetValueArgument argument : valueArguments) {
if (argument.isNamed()) {
someNamed = true;
break;
}
}
final List<JetExpression> functionLiteralArguments = call.getFunctionLiteralArguments();
// TODO : must be a check
assert functionLiteralArguments.size() <= 1;
if (someNamed) {
// TODO : check that all are named
trace.getErrorHandler().genericError(call.asElement().getNode(), "Named arguments are not supported"); // TODO
} else {
List<JetExpression> positionedValueArguments = new ArrayList<JetExpression>();
for (JetValueArgument argument : valueArguments) {
JetExpression argumentExpression = argument.getArgumentExpression();
if (argumentExpression != null) {
positionedValueArguments.add(argumentExpression);
}
}
positionedValueArguments.addAll(functionLiteralArguments);
List<JetType> valueArgumentTypes = new ArrayList<JetType>();
for (JetExpression valueArgument : positionedValueArguments) {
valueArgumentTypes.add(safeGetType(scope, valueArgument, false, NO_EXPECTED_TYPE)); // TODO
}
OverloadResolutionResult resolutionResult = overloadDomain.getFunctionDescriptorForPositionedArguments(typeArguments, valueArgumentTypes);
if (resolutionResult.isSuccess()) {
final FunctionDescriptor functionDescriptor = resolutionResult.getFunctionDescriptor();
checkGenericBoundsInAFunctionCall(call.getTypeArguments(), typeArguments, functionDescriptor);
return functionDescriptor.getReturnType();
}
}
return null;
}
private void checkGenericBoundsInAFunctionCall(List<JetTypeProjection> jetTypeArguments, List<JetType> typeArguments, FunctionDescriptor functionDescriptor) {
Map<TypeConstructor, TypeProjection> context = Maps.newHashMap();
List<TypeParameterDescriptor> typeParameters = functionDescriptor.getOriginal().getTypeParameters();
for (int i = 0, typeParametersSize = typeParameters.size(); i < typeParametersSize; i++) {
TypeParameterDescriptor typeParameter = typeParameters.get(i);
JetType typeArgument = typeArguments.get(i);
context.put(typeParameter.getTypeConstructor(), new TypeProjection(typeArgument));
}
TypeSubstitutor substitutor = TypeSubstitutor.create(context);
for (int i = 0, typeParametersSize = typeParameters.size(); i < typeParametersSize; i++) {
TypeParameterDescriptor typeParameterDescriptor = typeParameters.get(i);
JetType typeArgument = typeArguments.get(i);
JetTypeReference typeReference = jetTypeArguments.get(i).getTypeReference();
assert typeReference != null;
classDescriptorResolver.checkBounds(typeReference, typeArgument, typeParameterDescriptor, substitutor);
}
}
@Nullable
public JetType checkTypeInitializerCall(JetScope scope, @NotNull JetTypeReference typeReference, @NotNull JetCall call) {
JetTypeElement typeElement = typeReference.getTypeElement();
if (typeElement instanceof JetUserType) {
JetUserType userType = (JetUserType) typeElement;
// TODO : to infer constructor parameters, one will need to
// 1) resolve a _class_ from the typeReference
// 2) rely on the overload domain of constructors of this class to infer type arguments
// For now we assume that the type arguments are provided, and thus the typeReference can be
// resolved into a valid type
JetType receiverType = typeResolver.resolveType(scope, typeReference);
DeclarationDescriptor declarationDescriptor = receiverType.getConstructor().getDeclarationDescriptor();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
for (JetTypeProjection typeProjection : userType.getTypeArguments()) {
switch (typeProjection.getProjectionKind()) {
case IN:
case OUT:
case STAR:
// TODO : Bug in the editor
trace.getErrorHandler().genericError(typeProjection.getProjectionNode(), "Projections are not allowed in constructor type arguments");
break;
case NONE:
break;
}
}
JetSimpleNameExpression referenceExpression = userType.getReferenceExpression();
if (referenceExpression != null) {
return checkClassConstructorCall(scope, referenceExpression, classDescriptor, receiverType, call);
}
}
else {
trace.getErrorHandler().genericError(((JetElement) call).getNode(), "Calling a constructor is only supported for ordinary classes"); // TODO : review the message
}
return null;
}
else {
if (typeElement != null) {
trace.getErrorHandler().genericError(typeElement.getNode(), "Calling a constructor is only supported for ordinary classes"); // TODO : Better message
}
}
return null;
}
@Nullable
public JetType checkClassConstructorCall(
@NotNull JetScope scope,
@NotNull JetReferenceExpression referenceExpression,
@NotNull ClassDescriptor classDescriptor,
@NotNull JetType receiverType,
@NotNull JetCall call) {
// When one writes 'new Array<in T>(...)' this does not make much sense, and an instance
// of 'Array<T>' must be created anyway.
// Thus, we should either prohibit projections in type arguments in such contexts,
// or treat them as an automatic upcast to the desired type, i.e. for the user not
// to be forced to write
// val a : Array<in T> = new Array<T>(...)
// NOTE: Array may be a bad example here, some classes may have substantial functionality
// not involving their type parameters
//
// The code below upcasts the type automatically
FunctionGroup constructors = classDescriptor.getConstructors();
OverloadDomain constructorsOverloadDomain = semanticServices.getOverloadResolver().getOverloadDomain(null, constructors);
JetType constructorReturnedType;
if (call instanceof JetDelegatorToThisCall) {
List<TypeProjection> typeArguments = receiverType.getArguments();
List<JetType> projectionsStripped = Lists.newArrayList();
for (TypeProjection typeArgument : typeArguments) {
projectionsStripped.add(typeArgument.getType());
}
constructorReturnedType = resolveCallWithTypeArguments(
scope,
wrapForTracing(constructorsOverloadDomain, referenceExpression, call.getValueArgumentList(), false),
call, projectionsStripped);
}
else {
constructorReturnedType = resolveCall(
scope,
wrapForTracing(constructorsOverloadDomain, referenceExpression, call.getValueArgumentList(), false),
call);
}
if (constructorReturnedType == null && !ErrorUtils.isErrorType(receiverType)) {
DeclarationDescriptor declarationDescriptor = receiverType.getConstructor().getDeclarationDescriptor();
assert declarationDescriptor != null;
trace.record(BindingContext.REFERENCE_TARGET, referenceExpression, declarationDescriptor);
// TODO : more helpful message
JetValueArgumentList argumentList = call.getValueArgumentList();
final String errorMessage = "Cannot find a constructor overload for class " + classDescriptor.getName() + " with these arguments";
if (argumentList != null) {
trace.getErrorHandler().genericError(argumentList.getNode(), errorMessage);
}
else {
trace.getErrorHandler().genericError(call.asElement().getNode(), errorMessage);
}
constructorReturnedType = receiverType;
}
// If no upcast needed:
return constructorReturnedType;
// Automatic upcast:
// result = receiverType;
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType : possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final boolean preferBlock;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
boolean preferBlock,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.preferBlock = preferBlock;
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return new TypeInferenceContext(trace, scope, preferBlock, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return new TypeInferenceContext(trace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return new TypeInferenceContext(trace, scope, preferBlock, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public JetType getType(@NotNull JetScope scope, @NotNull JetExpression expression, boolean preferBlock, TypeInferenceContext context) {
return getType(expression, new TypeInferenceContext(context.trace, scope, preferBlock, context.dataFlowInfo, context.expectedType, context.expectedReturnType));
}
private JetType getTypeWithNewDataFlowInfo(JetScope scope, JetExpression expression, boolean preferBlock, @NotNull DataFlowInfo newDataFlowInfo, TypeInferenceContext context) {
return getType(expression, new TypeInferenceContext(context.trace, scope, preferBlock, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if ((boolean) context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type inference has run into a recursive problem"); // TODO : message
result = null;
}
if (!(boolean) context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return context.services.checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return context.services.getBlockReturnedTypeWithWritableScope(scope, block, context);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(BindingContext.REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
if (referencedName != null) {
VariableDescriptor variable = context.scope.getVariable(referencedName);
if (variable != null) {
context.trace.record(BindingContext.REFERENCE_TARGET, expression, variable);
JetType result = variable.getOutType();
if (result == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(BindingContext.REFERENCE_TARGET, expression, classifier);
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
}
context.trace.getErrorHandler().unresolvedReference(expression);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(BindingContext.REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
if (context.preferBlock && !functionLiteral.hasParameterSpecification()) {
context.trace.record(BindingContext.BLOCK, expression);
return context.services.checkType(getBlockReturnedType(context.scope, functionLiteral.getBodyExpression(), context), expression, context);
}
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Type inference for parameters is not implemented yet");
type = ErrorUtils.createErrorType("Not inferred");
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType = receiverTypeRef == null ? null : receiverType;
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo);
}
else {
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(context.expectedType)) {
returnType = JetStandardClasses.getReturnType(context.expectedType);
}
returnType = getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(context.scope, expression.getExpression(), false, context), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return null;
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(context.scope, thrownExpression, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(context.scope, returnedExpression, false, context.replaceExpectedType(context.expectedReturnType));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, false, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(BindingContext.REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(BindingContext.REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(BindingContext.REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
return getBlockReturnedType(context.scope, expression, context);
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, false, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewDataFlowInfo(scopeToExtend, bodyExpression, true, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetType selectorReturnType = getSelectorReturnType(subjectType, pattern.getDecomposerExpression(), context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(scopeToExtend, expression, false, context);
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchScope, catchBody, true, context);
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(context.scope, finallyBlock.getFinalExpression(), true, context);
if (type != null) {
types.add(type);
}
}
JetType type = getType(context.scope, tryBlock, true, context);
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewDataFlowInfo(thenScope, thenBranch, true, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewDataFlowInfo(context.scope, elseBranch, true, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewDataFlowInfo(thenScope, thenBranch, true, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewDataFlowInfo(context.scope, elseBranch, true, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(scope, condition, false, context);
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewDataFlowInfo(scopeToExtend, body, true, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(context.scope, body, true, context);
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(context.scope, loopRange, false, context);
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(loopScope, body, true, context); // TODO
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
ASTNode reportErrorsOn = loopRange.getNode();
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
if (iteratorResolutionResult.isSuccess()) {
FunctionDescriptor iteratorFunction = iteratorResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_ITERATOR, loopRange, iteratorFunction);
JetType iteratorType = iteratorFunction.getReturnType();
FunctionDescriptor hasNextFunction = checkHasNextFunctionSupport(loopRange, iteratorType, context);
boolean hasNextFunctionSupported = hasNextFunction != null;
VariableDescriptor hasNextProperty = checkHasNextPropertySupport(loopRange, iteratorType, context);
boolean hasNextPropertySupported = hasNextProperty != null;
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
else {
context.trace.record(LOOP_RANGE_HAS_NEXT, loopRange, hasNextFunctionSupported ? hasNextFunction : hasNextProperty);
}
OverloadResolutionResult nextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "next");
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
FunctionDescriptor nextFunction = nextResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_NEXT, loopRange, nextFunction);
return nextFunction.getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return null;
} else {
assert hasNextResolutionResult.isSuccess();
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextResolutionResult.getFunctionDescriptor();
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, new TypeInferenceContext(context.trace, context.scope, false, context.dataFlowInfo, NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (receiverType == null) return null;
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
// ErrorHandler errorHandler = context.trace.getErrorHandler();
// errorHandler.openRegion();
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.addAllMyDataTo(context.trace);
somethingFound = true;
break;
}
else {
autocastResolutionTrace = new TemporaryBindingTrace(context.trace.getBindingContext());
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
private JetType getCallExpressionType(@Nullable JetType receiverType, @NotNull JetCallExpression callExpression, TypeInferenceContext context) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression == null) {
return null;
}
OverloadDomain overloadDomain = context.services.getOverloadDomain(receiverType, context.scope, calleeExpression, callExpression.getValueArgumentList());
return context.services.resolveCall(context.scope, overloadDomain, callExpression);
}
private JetType getSelectorReturnType(JetType receiverType, JetExpression selectorExpression, TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return getCallExpressionType(receiverType, (JetCallExpression) selectorExpression, context);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetScope compositeScope = new ScopeWithReceiver(context.scope, receiverType, semanticServices.getTypeChecker());
return getType(compositeScope, selectorExpression, false, context);
}
else if (selectorExpression != null) {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
// return context.services.checkType(context.services.resolveCall(context.scope, expression, context.expectedType), expression, context);
return context.services.checkType(getCallExpressionType(null, expression, context), expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(context.scope, expression.getLeftHandSide(), false, context);
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(context.scope, baseExpression, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.lookupFunction(context.scope, expression.getOperationSign(), name, receiverType, Collections.<JetType>emptyList(), true);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(expression, referencedName, context.scope, true, context);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(expression, binaryOperationNames.get(operationType), context.scope, true, context);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(expression, "compareTo", context.scope, true, context);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(context.scope, left, false, context);
if (leftType != null) {
JetType rightType = getType(context.scope, right, false, context);
if (rightType != null) {
FunctionDescriptor equals = context.services.lookupFunction(
context.scope, operationSign, "equals",
leftType, Collections.singletonList(JetStandardClasses.getNullableAnyType()), false);
if (equals != null) {
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(context.scope, left, false, context);
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getTypeWithNewDataFlowInfo(rightScope, right, false, flowInfoLeft, context);
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(context.scope, left, false, context);
JetType rightType = right == null ? null : getType(context.scope, right, false, contextWithExpectedType);
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType containsType = getTypeForBinaryCall(context.scope, right, operationSign, left, name, true, context);
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(context.scope, left, false, context);
if (leftType != null && right != null) {
JetType rightType = getType(context.scope, right, false, context);
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Nullable
protected List<JetType> getTypes(JetScope scope, List<JetExpression> indexExpressions, TypeInferenceContext context) {
List<JetType> argumentTypes = new ArrayList<JetType>();
TypeInferenceContext newContext = new TypeInferenceContext(context.trace, scope, false, context.dataFlowInfo, NO_EXPECTED_TYPE, NO_EXPECTED_TYPE);
for (JetExpression indexExpression : indexExpressions) {
JetType type = context.services.typeInferrerVisitor.getType(indexExpression, newContext);
if (type == null) {
return null;
}
argumentTypes.add(type);
context.services.typeInferrerVisitor.resetResult(); // TODO : recreate?
}
return argumentTypes;
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(context.scope, arrayExpression, false, context);
List<JetExpression> indexExpressions = expression.getIndexExpressions();
List<JetType> argumentTypes = getTypes(context.scope, indexExpressions, context);
if (argumentTypes == null) return null;
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.lookupFunction(context.scope, expression, "get", receiverType, argumentTypes, true);
if (functionDescriptor != null) {
// checkNullSafety(receiverType, expression.getIndexExpressions().get(0).getNode(), functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(
@NotNull JetBinaryExpression expression,
@NotNull String name,
@NotNull JetScope scope,
boolean reportUnresolved,
@NotNull TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) {
return null;
}
JetSimpleNameExpression operationSign = expression.getOperationReference();
return getTypeForBinaryCall(scope, left, operationSign, right, name, reportUnresolved, context);
}
@Nullable
private JetType getTypeForBinaryCall(
@NotNull JetScope scope,
@NotNull JetExpression left,
@NotNull JetSimpleNameExpression operationSign,
@NotNull JetExpression right,
@NotNull String name,
boolean reportUnresolved,
@NotNull TypeInferenceContext context) {
JetType leftType = getType(scope, left, false, context);
JetType rightType = getType(scope, right, false, context);
if (leftType == null || rightType == null) {
return null;
}
FunctionDescriptor functionDescriptor = context.services.lookupFunction(scope, operationSign, name, leftType, Collections.singletonList(rightType), reportUnresolved);
if (functionDescriptor != null) {
if (leftType.isNullable()) {
// TODO : better error message for '1 + nullableVar' case
context.trace.getErrorHandler().genericError(operationSign.getNode(),
"Infix call corresponds to a dot-qualified call '" +
left.getText() + "." + name + "(" + right.getText() + ")'" +
" which is not allowed on a nullable receiver '" + right.getText() + "'." +
" Use '?.'-qualified call instead");
}
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(context.scope, entryExpression, true, context);
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(scope, initializer, false, context.replaceExpectedType(outType));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
JetType assignmentOperationType = getTypeForBinaryCall(expression, name, scope, false, context);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(expression, counterpartName, scope, true, context);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(scope, left, false, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (right != null) {
JetType rightType = getType(scope, right, false, context.replaceExpectedType(leftType));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
List<JetType> argumentTypes = getTypes(scope, arrayAccessExpression.getIndexExpressions(), context);
if (argumentTypes == null) return null;
JetType rhsType = getType(scope, rightHandSide, false, context);
if (rhsType == null) return null;
argumentTypes.add(rhsType);
JetType receiverType = getType(scope, arrayAccessExpression.getArrayExpression(), false, context);
if (receiverType == null) return null;
// TODO : nasty hack: effort is duplicated
context.services.lookupFunction(scope, arrayAccessExpression, "set", receiverType, argumentTypes, true);
FunctionDescriptor functionDescriptor = context.services.lookupFunction(scope, operationSign, "set", receiverType, argumentTypes, true);
if (functionDescriptor == null) return null;
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
Right
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.CompositeErrorHandler;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
import static org.jetbrains.jet.lang.resolve.BindingContext.STATEMENT;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType : possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION,
COERCION_TO_UNIT
}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (operationType == JetTokens.COLON) {
if (actualType != null && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
}
result = targetType;
}
else if (operationType == JetTokens.AS_KEYWORD) {
checkForCastImpossibility(expression, actualType, targetType, context);
result = targetType;
}
else if (operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
result = TypeUtils.makeNullable(targetType);
}
else {
context.trace.getErrorHandler().genericError(expression.getOperationSign().getNode(), "Unsupported binary operation");
}
}
return context.services.checkType(result, expression, context);
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType); // TODO : message
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange.getNode(), context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
private boolean checkHasNextPropertySupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return false;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.CompositeErrorHandler;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
import static org.jetbrains.jet.lang.resolve.BindingContext.STATEMENT;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType : possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION,
COERCION_TO_UNIT
}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (operationType == JetTokens.COLON) {
if (actualType != null && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
}
result = targetType;
}
else if (operationType == JetTokens.AS_KEYWORD) {
checkForCastImpossibility(expression, actualType, targetType, context);
result = targetType;
}
else if (operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
result = TypeUtils.makeNullable(targetType);
}
else {
context.trace.getErrorHandler().genericError(expression.getOperationSign().getNode(), "Unsupported binary operation");
}
}
return context.services.checkType(result, expression, context);
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType); // TODO : message
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange.getNode(), context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
private boolean checkHasNextPropertySupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return false;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
MergeMethods
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.CompositeErrorHandler;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType: possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION ,
COERCION_TO_UNIT}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
<<<<<<< MINE
=======
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
>>>>>>> YOURS
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
<<<<<<< MINE
return getBlockReturnedType(context.scope, expression, context);
=======
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
>>>>>>> YOURS
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.CompositeErrorHandler;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType: possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION ,
COERCION_TO_UNIT}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
<<<<<<< MINE
=======
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
>>>>>>> YOURS
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
<<<<<<< MINE
return getBlockReturnedType(context.scope, expression, context);
=======
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
>>>>>>> YOURS
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
KeepBothMethods
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.CompositeErrorHandler;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType: possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION ,
COERCION_TO_UNIT}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
<<<<<<< MINE
=======
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
>>>>>>> YOURS
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
<<<<<<< MINE
return getBlockReturnedType(context.scope, expression, context);
=======
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
>>>>>>> YOURS
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
ASTNode reportErrorsOn = loopRange.getNode();
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
if (iteratorResolutionResult.isSuccess()) {
FunctionDescriptor iteratorFunction = iteratorResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_ITERATOR, loopRange, iteratorFunction);
JetType iteratorType = iteratorFunction.getReturnType();
FunctionDescriptor hasNextFunction = checkHasNextFunctionSupport(loopRange, iteratorType, context);
boolean hasNextFunctionSupported = hasNextFunction != null;
VariableDescriptor hasNextProperty = checkHasNextPropertySupport(loopRange, iteratorType, context);
boolean hasNextPropertySupported = hasNextProperty != null;
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
else {
context.trace.record(LOOP_RANGE_HAS_NEXT, loopRange, hasNextFunctionSupported ? hasNextFunction : hasNextProperty);
}
OverloadResolutionResult nextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "next");
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
FunctionDescriptor nextFunction = nextResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_NEXT, loopRange, nextFunction);
return nextFunction.getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return null;
} else {
assert hasNextResolutionResult.isSuccess();
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextResolutionResult.getFunctionDescriptor();
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.CompositeErrorHandler;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType: possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION ,
COERCION_TO_UNIT}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
<<<<<<< MINE
=======
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
>>>>>>> YOURS
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
<<<<<<< MINE
return getBlockReturnedType(context.scope, expression, context);
=======
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
>>>>>>> YOURS
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
ASTNode reportErrorsOn = loopRange.getNode();
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
if (iteratorResolutionResult.isSuccess()) {
FunctionDescriptor iteratorFunction = iteratorResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_ITERATOR, loopRange, iteratorFunction);
JetType iteratorType = iteratorFunction.getReturnType();
FunctionDescriptor hasNextFunction = checkHasNextFunctionSupport(loopRange, iteratorType, context);
boolean hasNextFunctionSupported = hasNextFunction != null;
VariableDescriptor hasNextProperty = checkHasNextPropertySupport(loopRange, iteratorType, context);
boolean hasNextPropertySupported = hasNextProperty != null;
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
else {
context.trace.record(LOOP_RANGE_HAS_NEXT, loopRange, hasNextFunctionSupported ? hasNextFunction : hasNextProperty);
}
OverloadResolutionResult nextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "next");
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
FunctionDescriptor nextFunction = nextResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_NEXT, loopRange, nextFunction);
return nextFunction.getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return null;
} else {
assert hasNextResolutionResult.isSuccess();
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextResolutionResult.getFunctionDescriptor();
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
Safe
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.CompositeErrorHandler;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType: possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION ,
COERCION_TO_UNIT}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
<<<<<<< MINE
=======
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
>>>>>>> YOURS
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
<<<<<<< MINE
return getBlockReturnedType(context.scope, expression, context);
=======
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
>>>>>>> YOURS
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
ASTNode reportErrorsOn = loopRange.getNode();
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
if (iteratorResolutionResult.isSuccess()) {
FunctionDescriptor iteratorFunction = iteratorResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_ITERATOR, loopRange, iteratorFunction);
JetType iteratorType = iteratorFunction.getReturnType();
FunctionDescriptor hasNextFunction = checkHasNextFunctionSupport(loopRange, iteratorType, context);
boolean hasNextFunctionSupported = hasNextFunction != null;
VariableDescriptor hasNextProperty = checkHasNextPropertySupport(loopRange, iteratorType, context);
boolean hasNextPropertySupported = hasNextProperty != null;
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
else {
context.trace.record(LOOP_RANGE_HAS_NEXT, loopRange, hasNextFunctionSupported ? hasNextFunction : hasNextProperty);
}
OverloadResolutionResult nextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "next");
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
FunctionDescriptor nextFunction = nextResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_NEXT, loopRange, nextFunction);
return nextFunction.getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
<<<<<<< MINE
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
=======
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return null;
} else {
assert hasNextResolutionResult.isSuccess();
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextResolutionResult.getFunctionDescriptor();
}
>>>>>>> YOURS
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
import org.jetbrains.jet.lang.CompositeErrorHandler;
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType: possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION ,
COERCION_TO_UNIT}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
<<<<<<< MINE
=======
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
>>>>>>> YOURS
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
<<<<<<< MINE
return getBlockReturnedType(context.scope, expression, context);
=======
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
>>>>>>> YOURS
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
ASTNode reportErrorsOn = loopRange.getNode();
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
if (iteratorResolutionResult.isSuccess()) {
FunctionDescriptor iteratorFunction = iteratorResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_ITERATOR, loopRange, iteratorFunction);
JetType iteratorType = iteratorFunction.getReturnType();
FunctionDescriptor hasNextFunction = checkHasNextFunctionSupport(loopRange, iteratorType, context);
boolean hasNextFunctionSupported = hasNextFunction != null;
VariableDescriptor hasNextProperty = checkHasNextPropertySupport(loopRange, iteratorType, context);
boolean hasNextPropertySupported = hasNextProperty != null;
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
else {
context.trace.record(LOOP_RANGE_HAS_NEXT, loopRange, hasNextFunctionSupported ? hasNextFunction : hasNextProperty);
}
OverloadResolutionResult nextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "next");
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
FunctionDescriptor nextFunction = nextResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_NEXT, loopRange, nextFunction);
return nextFunction.getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Nullable
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
if (iteratorResolutionResult.isSuccess()) {
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
return nextResolutionResult.getDescriptor().getReturnType();
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
<<<<<<< MINE
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return false;
} else {
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return true;
}
=======
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return null;
} else {
assert hasNextResolutionResult.isSuccess();
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextResolutionResult.getFunctionDescriptor();
}
>>>>>>> YOURS
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
// public TypeInferrerVisitorWithWritableScope(@NotNull BindingTrace trace, @NotNull JetScope scope) {
// super(trace);
// this.scope = newWritableScopeImpl(scope, trace).setDebugName("Block scope");
// }
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
Unstructured
package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
<<<<<<< MINE
=======
import org.jetbrains.jet.lang.CompositeErrorHandler;
>>>>>>> YOURS
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
<<<<<<< MINE
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
=======
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
import static org.jetbrains.jet.lang.resolve.BindingContext.STATEMENT;
>>>>>>> YOURS
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType : possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION,
COERCION_TO_UNIT
}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
<<<<<<< MINE
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
=======
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
>>>>>>> YOURS
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
<<<<<<< MINE
=======
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
>>>>>>> YOURS
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
<<<<<<< MINE
return getBlockReturnedType(context.scope, expression, context);
=======
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
>>>>>>> YOURS
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
<<<<<<< MINE
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
ASTNode reportErrorsOn = loopRange.getNode();
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
=======
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
>>>>>>> YOURS
if (iteratorResolutionResult.isSuccess()) {
<<<<<<< MINE
FunctionDescriptor iteratorFunction = iteratorResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_ITERATOR, loopRange, iteratorFunction);
JetType iteratorType = iteratorFunction.getReturnType();
FunctionDescriptor hasNextFunction = checkHasNextFunctionSupport(loopRange, iteratorType, context);
boolean hasNextFunctionSupported = hasNextFunction != null;
VariableDescriptor hasNextProperty = checkHasNextPropertySupport(loopRange, iteratorType, context);
boolean hasNextPropertySupported = hasNextProperty != null;
=======
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
>>>>>>> YOURS
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
else {
context.trace.record(LOOP_RANGE_HAS_NEXT, loopRange, hasNextFunctionSupported ? hasNextFunction : hasNextProperty);
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
<<<<<<< MINE
FunctionDescriptor nextFunction = nextResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_NEXT, loopRange, nextFunction);
return nextFunction.getReturnType();
=======
return nextResolutionResult.getDescriptor().getReturnType();
>>>>>>> YOURS
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
<<<<<<< MINE
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
=======
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
>>>>>>> YOURS
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return null;
} else {
<<<<<<< MINE
assert hasNextResolutionResult.isSuccess();
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
=======
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
>>>>>>> YOURS
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextResolutionResult.getFunctionDescriptor();
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}package org.jetbrains.jet.lang.types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.intellij.lang.ASTNode;
import com.intellij.psi.*;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jet.JetNodeTypes;
<<<<<<< MINE
=======
import org.jetbrains.jet.lang.CompositeErrorHandler;
>>>>>>> YOURS
import org.jetbrains.jet.lang.ErrorHandler;
import org.jetbrains.jet.lang.JetSemanticServices;
import org.jetbrains.jet.lang.cfg.JetFlowInformationProvider;
import org.jetbrains.jet.lang.descriptors.*;
import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.jetbrains.jet.lang.resolve.*;
import org.jetbrains.jet.lang.resolve.calls.CallResolver;
import org.jetbrains.jet.lang.resolve.calls.OverloadResolutionResult;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstant;
import org.jetbrains.jet.lang.resolve.constants.CompileTimeConstantResolver;
import org.jetbrains.jet.lang.resolve.constants.ErrorValue;
import org.jetbrains.jet.lang.resolve.constants.StringValue;
import org.jetbrains.jet.lexer.JetTokens;
import org.jetbrains.jet.resolve.DescriptorRenderer;
import org.jetbrains.jet.util.slicedmap.WritableSlice;
import java.util.*;
<<<<<<< MINE
import static org.jetbrains.jet.lang.resolve.BindingContext.*;
=======
import static org.jetbrains.jet.lang.resolve.BindingContext.REFERENCE_TARGET;
import static org.jetbrains.jet.lang.resolve.BindingContext.STATEMENT;
>>>>>>> YOURS
/**
* @author abreslav
*/
public class JetTypeInferrer {
private static final JetType FORBIDDEN = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "FORBIDDEN";
}
};
public static final JetType NO_EXPECTED_TYPE = new JetType() {
@NotNull
@Override
public TypeConstructor getConstructor() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public List<TypeProjection> getArguments() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public boolean isNullable() {
throw new UnsupportedOperationException(); // TODO
}
@NotNull
@Override
public JetScope getMemberScope() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public List<AnnotationDescriptor> getAnnotations() {
throw new UnsupportedOperationException(); // TODO
}
@Override
public String toString() {
return "NO_EXPECTED_TYPE";
}
};
private static final ImmutableMap<IElementType, String> unaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.PLUSPLUS, "inc")
.put(JetTokens.MINUSMINUS, "dec")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.EXCL, "not")
.build();
private static final ImmutableMap<IElementType, String> binaryOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MUL, "times")
.put(JetTokens.PLUS, "plus")
.put(JetTokens.MINUS, "minus")
.put(JetTokens.DIV, "div")
.put(JetTokens.PERC, "mod")
.put(JetTokens.ARROW, "arrow")
.put(JetTokens.RANGE, "rangeTo")
.build();
private static final Set<IElementType> comparisonOperations = Sets.<IElementType>newHashSet(JetTokens.LT, JetTokens.GT, JetTokens.LTEQ, JetTokens.GTEQ);
private static final Set<IElementType> equalsOperations = Sets.<IElementType>newHashSet(JetTokens.EQEQ, JetTokens.EXCLEQ);
private static final Set<IElementType> inOperations = Sets.<IElementType>newHashSet(JetTokens.IN_KEYWORD, JetTokens.NOT_IN);
public static final ImmutableMap<IElementType, String> assignmentOperationNames = ImmutableMap.<IElementType, String>builder()
.put(JetTokens.MULTEQ, "timesAssign")
.put(JetTokens.DIVEQ, "divAssign")
.put(JetTokens.PERCEQ, "modAssign")
.put(JetTokens.PLUSEQ, "plusAssign")
.put(JetTokens.MINUSEQ, "minusAssign")
.build();
private static final ImmutableMap<IElementType, IElementType> assignmentOperationCounterparts = ImmutableMap.<IElementType, IElementType>builder()
.put(JetTokens.MULTEQ, JetTokens.MUL)
.put(JetTokens.DIVEQ, JetTokens.DIV)
.put(JetTokens.PERCEQ, JetTokens.PERC)
.put(JetTokens.PLUSEQ, JetTokens.PLUS)
.put(JetTokens.MINUSEQ, JetTokens.MINUS)
.build();
private final JetSemanticServices semanticServices;
private final JetFlowInformationProvider flowInformationProvider;
private final Map<JetPattern, DataFlowInfo> patternsToDataFlowInfo = Maps.newHashMap();
private final Map<JetPattern, List<VariableDescriptor>> patternsToBoundVariableLists = Maps.newHashMap();
public JetTypeInferrer(@NotNull JetFlowInformationProvider flowInformationProvider, @NotNull JetSemanticServices semanticServices) {
this.semanticServices = semanticServices;
this.flowInformationProvider = flowInformationProvider;
}
public Services getServices(@NotNull BindingTrace trace) {
return new Services(trace);
}
public class Services {
private final BindingTrace trace;
private final CompileTimeConstantResolver compileTimeConstantResolver;
private final CallResolver callResolver;
private final TypeInferrerVisitor typeInferrerVisitor;
private final TypeInferrerVisitorWithNamespaces typeInferrerVisitorWithNamespaces;
private Services(BindingTrace trace) {
this.trace = trace;
this.compileTimeConstantResolver = new CompileTimeConstantResolver(semanticServices, trace);
this.typeInferrerVisitor = new TypeInferrerVisitor();
this.typeInferrerVisitorWithNamespaces = new TypeInferrerVisitorWithNamespaces();
this.callResolver = new CallResolver(semanticServices, trace, JetTypeInferrer.this);
}
public TypeInferrerVisitorWithWritableScope newTypeInferrerVisitorWithWritableScope(WritableScope scope) {
return new TypeInferrerVisitorWithWritableScope(scope);
}
@NotNull
public JetType safeGetType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
JetType type = getType(scope, expression, expectedType);
if (type != null) {
return type;
}
return ErrorUtils.createErrorType("Type for " + expression.getText());
}
@Nullable
public JetType getType(@NotNull final JetScope scope, @NotNull JetExpression expression, @NotNull JetType expectedType) {
return typeInferrerVisitor.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), expectedType, FORBIDDEN));
}
public JetType getTypeWithNamespaces(@NotNull final JetScope scope, @NotNull JetExpression expression) {
return typeInferrerVisitorWithNamespaces.getType(expression, newContext(trace, scope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
}
public CallResolver getCallResolver() {
return callResolver;
}
private void checkNullSafety(@Nullable JetType receiverType, @NotNull ASTNode operationTokenNode, @Nullable FunctionDescriptor callee) {
if (receiverType != null && callee != null) {
boolean namespaceType = receiverType instanceof NamespaceType;
JetType calleeReceiverType = callee.getReceiverType();
boolean nullableReceiver = !namespaceType && receiverType.isNullable();
boolean calleeForbidsNullableReceiver = calleeReceiverType == null || !calleeReceiverType.isNullable();
IElementType operationSign = operationTokenNode.getElementType();
if (nullableReceiver && calleeForbidsNullableReceiver && operationSign == JetTokens.DOT) {
trace.getErrorHandler().genericError(operationTokenNode, "Only safe calls (?.) are allowed on a nullable receiver of type " + receiverType);
}
else if ((!nullableReceiver || !calleeForbidsNullableReceiver) && operationSign == JetTokens.SAFE_ACCESS) {
if (namespaceType) {
trace.getErrorHandler().genericError(operationTokenNode, "Safe calls are not allowed on namespaces");
}
else {
trace.getErrorHandler().genericWarning(operationTokenNode, "Unnecessary safe call on a non-null receiver of type " + receiverType);
}
}
}
}
public void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor) {
checkFunctionReturnType(outerScope, function, functionDescriptor, DataFlowInfo.getEmpty());
}
private void checkFunctionReturnType(@NotNull JetScope outerScope, @NotNull JetDeclarationWithBody function, @NotNull FunctionDescriptor functionDescriptor, DataFlowInfo dataFlowInfo) {
JetType expectedReturnType = functionDescriptor.getReturnType();
if (!function.hasBlockBody() && !function.hasDeclaredReturnType()) {
expectedReturnType = NO_EXPECTED_TYPE;
}
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, dataFlowInfo, CoercionStrategy.NO_COERCION);
// Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(outerScope, function, functionDescriptor, expectedReturnType);
// if (typeMap.isEmpty()) {
// return; // The function returns Nothing
// }
// for (Map.Entry<JetElement, JetType> entry : typeMap.entrySet()) {
// JetType actualType = entry.castValue();
// JetElement element = entry.getKey();
// JetTypeChecker typeChecker = semanticServices.getTypeChecker();
// if (!typeChecker.isSubtypeOf(actualType, expectedReturnType)) {
// if (typeChecker.isConvertibleBySpecialConversion(actualType, expectedReturnType)) {
// if (expectedReturnType.getConstructor().equals(JetStandardClasses.getUnitType().getConstructor())
// && element.getParent() instanceof JetReturnExpression) {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type Unit");
// }
// }
// else {
// if (element == function) {
// JetExpression bodyExpression = function.getBodyExpression();
// assert bodyExpression != null;
// context.trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// else if (element instanceof JetExpression) {
// JetExpression expression = (JetExpression) element;
// context.trace.getErrorHandler().typeMismatch(expression, expectedReturnType, actualType);
// }
// else {
// context.trace.getErrorHandler().genericError(element.getNode(), "This function must return a value of type " + expectedReturnType);
// }
// }
// }
// }
}
public void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType) {
checkFunctionReturnType(functionInnerScope, function, expectedReturnType, DataFlowInfo.getEmpty(), CoercionStrategy.NO_COERCION);
}
private void checkFunctionReturnType(JetScope functionInnerScope, JetDeclarationWithBody function, @NotNull final JetType expectedReturnType, @NotNull DataFlowInfo dataFlowInfo, CoercionStrategy coercionForLastExpression) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
final boolean blockBody = function.hasBlockBody();
final TypeInferenceContext context =
blockBody
? newContext(trace, functionInnerScope, dataFlowInfo, NO_EXPECTED_TYPE, expectedReturnType)
: newContext(trace, functionInnerScope, dataFlowInfo, expectedReturnType, FORBIDDEN);
if (function instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression functionLiteralExpression = (JetFunctionLiteralExpression) function;
getBlockReturnedType(functionInnerScope, functionLiteralExpression.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context);
}
else {
typeInferrerVisitor.getType(bodyExpression, context);
}
List<JetElement> unreachableElements = Lists.newArrayList();
flowInformationProvider.collectUnreachableExpressions(function.asElement(), unreachableElements);
// This is needed in order to highlight only '1 < 2' and not '1', '<' and '2' as well
final Set<JetElement> rootUnreachableElements = JetPsiUtil.findRootExpressions(unreachableElements);
// TODO : (return 1) || (return 2) -- only || and right of it is unreachable
// TODO : try {return 1} finally {return 2}. Currently 'return 1' is reported as unreachable,
// though it'd better be reported more specifically
for (JetElement element : rootUnreachableElements) {
trace.getErrorHandler().genericError(element.getNode(), "Unreachable code");
}
List<JetExpression> returnedExpressions = Lists.newArrayList();
flowInformationProvider.collectReturnExpressions(function.asElement(), returnedExpressions);
boolean nothingReturned = returnedExpressions.isEmpty();
returnedExpressions.remove(function); // This will be the only "expression" if the body is empty
if (expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(expectedReturnType) && returnedExpressions.isEmpty() && !nothingReturned) {
trace.getErrorHandler().genericError(bodyExpression.getNode(), "This function must return a value of type " + expectedReturnType);
}
for (JetExpression returnedExpression : returnedExpressions) {
returnedExpression.accept(new JetVisitorVoid() {
@Override
public void visitReturnExpression(JetReturnExpression expression) {
if (!blockBody) {
trace.getErrorHandler().genericError(expression.getNode(), "Returns are not allowed for functions with expression body. Use block body in '{...}'");
}
}
@Override
public void visitExpression(JetExpression expression) {
if (blockBody && !JetStandardClasses.isUnit(expectedReturnType) && !rootUnreachableElements.contains(expression)) {
//TODO move to pseudocode
JetType type = typeInferrerVisitor.getType(expression, context.replaceExpectedType(NO_EXPECTED_TYPE));
if (type == null || !JetStandardClasses.isNothing(type)) {
trace.getErrorHandler().genericError(expression.getNode(), "A 'return' expression required in a function with a block body ('{...}')");
}
}
}
});
}
}
@Nullable
private JetType getBlockReturnedType(@NotNull JetScope outerScope, @NotNull JetBlockExpression expression, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
List<JetElement> block = expression.getStatements();
if (block.isEmpty()) {
return checkType(JetStandardClasses.getUnitType(), expression, context);
}
DeclarationDescriptor containingDescriptor = outerScope.getContainingDeclaration();
WritableScope scope = new WritableScopeImpl(outerScope, containingDescriptor, context.trace.getErrorHandler()).setDebugName("getBlockReturnedType");
return getBlockReturnedTypeWithWritableScope(scope, block, coercionStrategyForLastExpression, context);
}
@NotNull
public JetType inferFunctionReturnType(@NotNull JetScope outerScope, JetDeclarationWithBody function, FunctionDescriptor functionDescriptor) {
Map<JetElement, JetType> typeMap = collectReturnedExpressionsWithTypes(trace, outerScope, function, functionDescriptor);
Collection<JetType> types = typeMap.values();
return types.isEmpty()
? JetStandardClasses.getNothingType()
: semanticServices.getTypeChecker().commonSupertype(types);
}
private Map<JetElement, JetType> collectReturnedExpressionsWithTypes(
@NotNull BindingTrace trace,
JetScope outerScope,
JetDeclarationWithBody function,
FunctionDescriptor functionDescriptor) {
JetExpression bodyExpression = function.getBodyExpression();
assert bodyExpression != null;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(outerScope, functionDescriptor, trace);
typeInferrerVisitor.getType(bodyExpression, newContext(trace, functionInnerScope, DataFlowInfo.getEmpty(), NO_EXPECTED_TYPE, FORBIDDEN));
Collection<JetExpression> returnedExpressions = new ArrayList<JetExpression>();
Collection<JetElement> elementsReturningUnit = new ArrayList<JetElement>();
flowInformationProvider.collectReturnedInformation(function.asElement(), returnedExpressions, elementsReturningUnit);
Map<JetElement,JetType> typeMap = new HashMap<JetElement, JetType>();
for (JetExpression returnedExpression : returnedExpressions) {
JetType cachedType = trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, returnedExpression);
trace.record(STATEMENT, returnedExpression, false);
if (cachedType != null) {
typeMap.put(returnedExpression, cachedType);
}
}
for (JetElement jetElement : elementsReturningUnit) {
typeMap.put(jetElement, JetStandardClasses.getUnitType());
}
return typeMap;
}
private JetType getBlockReturnedTypeWithWritableScope(@NotNull WritableScope scope, @NotNull List<? extends JetElement> block, @NotNull CoercionStrategy coercionStrategyForLastExpression, TypeInferenceContext context) {
if (block.isEmpty()) {
return JetStandardClasses.getUnitType();
}
TypeInferrerVisitorWithWritableScope blockLevelVisitor = newTypeInferrerVisitorWithWritableScope(scope);
TypeInferenceContext newContext = newContext(trace, scope, context.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
JetType result = null;
for (Iterator<? extends JetElement> iterator = block.iterator(); iterator.hasNext(); ) {
final JetElement statement = iterator.next();
trace.record(STATEMENT, statement);
final JetExpression statementExpression = (JetExpression) statement;
//TODO constructor assert context.expectedType != FORBIDDEN : ""
if (!iterator.hasNext() && context.expectedType != NO_EXPECTED_TYPE) {
if (coercionStrategyForLastExpression == CoercionStrategy.COERCION_TO_UNIT && JetStandardClasses.isUnit(context.expectedType)) {
// This implements coercion to Unit
TemporaryBindingTrace temporaryTraceExpectingUnit = TemporaryBindingTrace.create(trace);
final boolean[] mismatch = new boolean[1];
BindingTraceAdapter errorInterceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceExpectingUnit, statementExpression, mismatch);
newContext = newContext(errorInterceptingTrace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
TemporaryBindingTrace temporaryTraceNoExpectedType = TemporaryBindingTrace.create(trace);
mismatch[0] = false;
BindingTraceAdapter interceptingTrace = makeTraceInterceptingTypeMismatch(temporaryTraceNoExpectedType, statementExpression, mismatch);
newContext = newContext(interceptingTrace, scope, newContext.dataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
if (mismatch[0]) {
temporaryTraceExpectingUnit.commit();
}
else {
temporaryTraceNoExpectedType.commit();
}
}
else {
temporaryTraceExpectingUnit.commit();
}
}
else {
newContext = newContext(trace, scope, newContext.dataFlowInfo, context.expectedType, context.expectedReturnType);
result = blockLevelVisitor.getType(statementExpression, newContext);
}
}
else {
result = blockLevelVisitor.getType(statementExpression, newContext);
}
DataFlowInfo newDataFlowInfo = blockLevelVisitor.getResultingDataFlowInfo();
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
if (newDataFlowInfo != context.dataFlowInfo) {
newContext = newContext(trace, scope, newDataFlowInfo, NO_EXPECTED_TYPE, context.expectedReturnType);
}
blockLevelVisitor.resetResult(); // TODO : maybe it's better to recreate the visitors with the same scope?
}
return result;
}
private BindingTraceAdapter makeTraceInterceptingTypeMismatch(final BindingTrace trace, final JetExpression expressionToWatch, final boolean[] mismatchFound) {
return new BindingTraceAdapter(trace) {
@NotNull
@Override
public ErrorHandler getErrorHandler() {
return new CompositeErrorHandler(super.getErrorHandler(), new ErrorHandler() {
@Override
public void typeMismatch(@NotNull JetExpression expression, @NotNull JetType expectedType, @NotNull JetType actualType) {
if (expression == expressionToWatch) {
mismatchFound[0] = true;
}
}
});
}
};
}
//TODO
private JetType enrichOutType(JetExpression expression, JetType initialType, @NotNull TypeInferenceContext context) {
if (expression == null) return initialType;
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
if (variableDescriptor != null) {
return context.dataFlowInfo.getOutType(variableDescriptor);
}
return initialType;
}
@Nullable
private JetType checkType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType != null && context.expectedType != null && context.expectedType != NO_EXPECTED_TYPE) {
if (!semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
}
}
return expressionType;
}
@Nullable
private JetType checkEnrichedType(@Nullable JetType expressionType, @NotNull JetExpression expression, @NotNull TypeInferenceContext context) {
if (expressionType == null || context.expectedType == null || context.expectedType == NO_EXPECTED_TYPE ||
semanticServices.getTypeChecker().isSubtypeOf(expressionType, context.expectedType)) {
return expressionType;
}
VariableDescriptor variableDescriptor = getVariableDescriptorFromSimpleName(expression, context);
boolean appropriateTypeFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
for (JetType possibleType : possibleTypes) {
if (semanticServices.getTypeChecker().isSubtypeOf(possibleType, context.expectedType)) {
appropriateTypeFound = true;
break;
}
}
if (!appropriateTypeFound) {
JetType notnullType = context.dataFlowInfo.getOutType(variableDescriptor);
if (notnullType != null && semanticServices.getTypeChecker().isSubtypeOf(notnullType, context.expectedType)) {
appropriateTypeFound = true;
}
}
}
if (!appropriateTypeFound) {
context.trace.getErrorHandler().typeMismatch(expression, context.expectedType, expressionType);
return expressionType;
}
checkAutoCast(expression, context.expectedType, variableDescriptor, context.trace);
return context.expectedType;
}
private void checkAutoCast(JetExpression expression, JetType type, VariableDescriptor variableDescriptor, BindingTrace trace) {
if (variableDescriptor.isVar()) {
trace.getErrorHandler().genericError(expression.getNode(), "Automatic cast to " + type + " is impossible, because variable " + variableDescriptor.getName() + " is mutable");
} else {
trace.record(BindingContext.AUTOCAST, expression, type);
}
}
@NotNull
private List<JetType> checkArgumentTypes(@NotNull List<JetType> argumentTypes, @NotNull List<JetExpression> arguments, @NotNull List<TypeProjection> expectedArgumentTypes, @NotNull TypeInferenceContext context) {
if (arguments.size() == 0 || argumentTypes.size() != arguments.size() || expectedArgumentTypes.size() != arguments.size()) {
return argumentTypes;
}
List<JetType> result = Lists.newArrayListWithCapacity(arguments.size());
for (int i = 0, argumentTypesSize = argumentTypes.size(); i < argumentTypesSize; i++) {
result.add(checkEnrichedType(argumentTypes.get(i), arguments.get(i), context.replaceExpectedType(expectedArgumentTypes.get(i).getType())));
}
return result;
}
@Nullable
private VariableDescriptor getVariableDescriptorFromSimpleName(@NotNull JetExpression receiverExpression, @NotNull TypeInferenceContext context) {
if (receiverExpression instanceof JetBinaryExpressionWithTypeRHS) {
JetBinaryExpressionWithTypeRHS expression = (JetBinaryExpressionWithTypeRHS) receiverExpression;
if (expression.getOperationSign().getReferencedNameElementType() == JetTokens.COLON) {
return getVariableDescriptorFromSimpleName(expression.getLeft(), context);
}
}
VariableDescriptor variableDescriptor = null;
if (receiverExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) receiverExpression;
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, nameExpression);
if (declarationDescriptor instanceof VariableDescriptor) {
variableDescriptor = (VariableDescriptor) declarationDescriptor;
}
}
return variableDescriptor;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private enum CoercionStrategy {
NO_COERCION,
COERCION_TO_UNIT
}
@NotNull
private TypeInferenceContext newContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
return new TypeInferenceContext(trace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
private class TypeInferenceContext {
public final BindingTrace trace;
public final TypeResolver typeResolver;
public final ClassDescriptorResolver classDescriptorResolver;
public final JetScope scope;
public final Services services;
public final DataFlowInfo dataFlowInfo;
public final JetType expectedType;
public final JetType expectedReturnType;
@Deprecated // Only factory methods
private TypeInferenceContext(
@NotNull BindingTrace trace,
@NotNull JetScope scope,
@NotNull DataFlowInfo dataFlowInfo,
@NotNull JetType expectedType,
@NotNull JetType expectedReturnType) {
this.trace = trace;
this.typeResolver = new TypeResolver(semanticServices, trace, true);
this.classDescriptorResolver = semanticServices.getClassDescriptorResolver(trace);
this.scope = scope;
this.services = getServices(trace);
this.dataFlowInfo = dataFlowInfo;
this.expectedType = expectedType;
this.expectedReturnType = expectedReturnType;
}
public TypeInferenceContext replaceDataFlowInfo(DataFlowInfo newDataFlowInfo) {
return newContext(trace, scope, newDataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedType(@Nullable JetType newExpectedType) {
if (newExpectedType == null) return replaceExpectedType(NO_EXPECTED_TYPE);
if (expectedType == newExpectedType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedReturnType(@Nullable JetType newExpectedReturnType) {
if (newExpectedReturnType == null) return replaceExpectedReturnType(NO_EXPECTED_TYPE);
if (expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, expectedType, newExpectedReturnType);
}
public TypeInferenceContext replaceBindingTrace(@NotNull BindingTrace newTrace) {
if (newTrace == trace) return this;
<<<<<<< MINE
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, expectedType, expectedReturnType);
}
public TypeInferenceContext replaceExpectedTypeAndTrace(@NotNull JetType newExpectedType, @NotNull BindingTrace newTrace) {
if (newExpectedType == expectedType && newTrace == trace) return this;
return new TypeInferenceContext(newTrace, scope, preferBlock, dataFlowInfo, newExpectedType, expectedReturnType);
=======
return newContext(newTrace, scope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceScope(@NotNull JetScope newScope) {
if (newScope == scope) return this;
return newContext(trace, newScope, dataFlowInfo, expectedType, expectedReturnType);
}
@NotNull
public TypeInferenceContext replaceExpectedTypes(@NotNull JetType newExpectedType, @NotNull JetType newExpectedReturnType) {
if (expectedType == newExpectedType && expectedReturnType == newExpectedReturnType) return this;
return newContext(trace, scope, dataFlowInfo, newExpectedType, newExpectedReturnType);
>>>>>>> YOURS
}
}
private class TypeInferrerVisitor extends JetVisitor<JetType, TypeInferenceContext> {
protected DataFlowInfo resultDataFlowInfo;
@Nullable
public DataFlowInfo getResultingDataFlowInfo() {
return resultDataFlowInfo;
}
@Nullable
public final JetType getType(@NotNull JetExpression expression, TypeInferenceContext context) {
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
JetType result;
try {
result = expression.visit(this, context);
// Some recursive definitions (object expressions) must put their types in the cache manually:
if (context.trace.get(BindingContext.PROCESSED, expression)) {
return context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, expression);
}
if (result instanceof DeferredType) {
result = ((DeferredType) result).getActualType();
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, result);
if (JetStandardClasses.isNothing(result) && !result.isNullable()) {
markDominatedExpressionsAsUnreachable(expression, context);
}
}
}
catch (ReenteringLazyValueComputationException e) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Type checking has run into a recursive problem"); // TODO : message
result = null;
}
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.RESOLUTION_SCOPE, expression, context.scope);
}
context.trace.record(BindingContext.PROCESSED, expression);
return result;
}
private JetType getTypeWithNewScopeAndDataFlowInfo(@NotNull JetScope scope, @NotNull JetExpression expression, @NotNull DataFlowInfo newDataFlowInfo, @NotNull TypeInferenceContext context) {
return getType(expression, newContext(context.trace, scope, newDataFlowInfo, context.expectedType, context.expectedReturnType));
}
public void resetResult() {
// result = null;
resultDataFlowInfo = null;
// resultScope = null;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
private void markDominatedExpressionsAsUnreachable(JetExpression expression, TypeInferenceContext context) {
List<JetElement> dominated = new ArrayList<JetElement>();
flowInformationProvider.collectDominatedExpressions(expression, dominated);
Set<JetElement> rootExpressions = JetPsiUtil.findRootExpressions(dominated);
for (JetElement rootExpression : rootExpressions) {
context.trace.getErrorHandler().genericError(rootExpression.getNode(),
"This code is unreachable, because '" + expression.getText() + "' never terminates normally");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public JetType visitSimpleNameExpression(JetSimpleNameExpression expression, TypeInferenceContext context) {
// TODO : other members
// TODO : type substitutions???
String referencedName = expression.getReferencedName();
if (expression.getReferencedNameElementType() == JetTokens.FIELD_IDENTIFIER
&& referencedName != null) {
PropertyDescriptor property = context.scope.getPropertyByFieldReference(referencedName);
if (property == null) {
context.trace.getErrorHandler().unresolvedReference(expression);
}
else {
context.trace.record(REFERENCE_TARGET, expression, property);
return context.services.checkEnrichedType(property.getOutType(), expression, context);
}
}
else {
return getSelectorReturnType(null, expression, context); // TODO : Extensions to this
// assert JetTokens.IDENTIFIER == expression.getReferencedNameElementType();
// if (referencedName != null) {
// VariableDescriptor variable = context.scope.getVariable(referencedName);
// if (variable != null) {
// context.trace.record(REFERENCE_TARGET, expression, variable);
// JetType result = variable.getOutType();
// if (result == null) {
// context.trace.getErrorHandler().genericError(expression.getNode(), "This variable is not readable in this context");
// }
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// return lookupNamespaceOrClassObject(expression, referencedName, context);
// ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
// if (classifier != null) {
// JetType classObjectType = classifier.getClassObjectType();
// JetType result = null;
// if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
// result = classObjectType;
// }
// else {
// context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
// }
// context.trace.record(REFERENCE_TARGET, expression, classifier);
// return context.services.checkEnrichedType(result, expression, context);
// }
// else {
// JetType[] result = new JetType[1];
// if (furtherNameLookup(expression, referencedName, result, context)) {
// return context.services.checkEnrichedType(result[0], expression, context);
// }
//
// }
// }
// context.trace.getErrorHandler().unresolvedReference(expression);
// }
}
return null;
}
private JetType lookupNamespaceOrClassObject(JetSimpleNameExpression expression, String referencedName, TypeInferenceContext context) {
ClassifierDescriptor classifier = context.scope.getClassifier(referencedName);
if (classifier != null) {
JetType classObjectType = classifier.getClassObjectType();
JetType result = null;
if (classObjectType != null && (isNamespacePosition() || classifier.isClassObjectAValue())) {
result = classObjectType;
}
else {
context.trace.getErrorHandler().genericError(expression.getNode(), "Classifier " + classifier.getName() + " does not have a class object");
}
context.trace.record(REFERENCE_TARGET, expression, classifier);
if (result == null) {
return ErrorUtils.createErrorType("No class object in " + expression.getReferencedName());
}
return context.services.checkEnrichedType(result, expression, context);
}
else {
JetType[] result = new JetType[1];
if (furtherNameLookup(expression, referencedName, result, context)) {
return context.services.checkEnrichedType(result[0], expression, context);
}
}
return null;
}
public boolean isNamespacePosition() {
return false;
}
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
NamespaceType namespaceType = lookupNamespaceType(expression, referencedName, context);
if (namespaceType != null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Expression expected, but a namespace name found");
return true;
}
return false;
}
@Nullable
protected NamespaceType lookupNamespaceType(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, TypeInferenceContext context) {
NamespaceDescriptor namespace = context.scope.getNamespace(referencedName);
if (namespace == null) {
return null;
}
context.trace.record(REFERENCE_TARGET, expression, namespace);
return namespace.getNamespaceType();
}
@Override
public JetType visitObjectLiteralExpression(final JetObjectLiteralExpression expression, final TypeInferenceContext context) {
final JetType[] result = new JetType[1];
BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor> handler = new BindingTraceAdapter.RecordHandler<PsiElement, DeclarationDescriptor>() {
@Override
public void handleRecord(WritableSlice<PsiElement, DeclarationDescriptor> slice, PsiElement declaration, final DeclarationDescriptor descriptor) {
if (declaration == expression.getObjectDeclaration()) {
JetType defaultType = new DeferredType(new LazyValue<JetType>() {
@Override
protected JetType compute() {
return ((ClassDescriptor) descriptor).getDefaultType();
}
});
result[0] = defaultType;
if (!context.trace.get(BindingContext.PROCESSED, expression)) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression, defaultType);
context.trace.record(BindingContext.PROCESSED, expression);
}
}
}
};
BindingTraceAdapter traceAdapter = new BindingTraceAdapter(context.trace);
for (WritableSlice slice : BindingContext.DECLARATIONS_TO_DESCRIPTORS) {
//noinspection unchecked
traceAdapter.addHandler(slice, handler);
}
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, traceAdapter);
topDownAnalyzer.processObject(context.scope, context.scope.getContainingDeclaration(), expression.getObjectDeclaration());
return context.services.checkType(result[0], expression, context);
}
@Override
public JetType visitFunctionLiteralExpression(JetFunctionLiteralExpression expression, TypeInferenceContext context) {
JetFunctionLiteral functionLiteral = expression.getFunctionLiteral();
JetTypeReference receiverTypeRef = functionLiteral.getReceiverTypeRef();
final JetType receiverType;
if (receiverTypeRef != null) {
receiverType = context.typeResolver.resolveType(context.scope, receiverTypeRef);
} else {
receiverType = context.scope.getThisType();
}
FunctionDescriptorImpl functionDescriptor = new FunctionDescriptorImpl(
context.scope.getContainingDeclaration(), Collections.<AnnotationDescriptor>emptyList(), "<anonymous>");
List<JetType> parameterTypes = new ArrayList<JetType>();
List<ValueParameterDescriptor> valueParameterDescriptors = Lists.newArrayList();
List<JetParameter> parameters = functionLiteral.getValueParameters();
JetType expectedType = context.expectedType;
List<ValueParameterDescriptor> valueParameters = null;
boolean functionTypeExpected = expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isFunctionType(expectedType);
if (functionTypeExpected) {
valueParameters = JetStandardClasses.getValueParameters(functionDescriptor, expectedType);
}
for (int i = 0, parametersSize = parameters.size(); i < parametersSize; i++) {
JetParameter parameter = parameters.get(i);
JetTypeReference typeReference = parameter.getTypeReference();
JetType type;
if (typeReference != null) {
type = context.typeResolver.resolveType(context.scope, typeReference);
}
else {
if (valueParameters != null) {
type = valueParameters.get(i).getOutType();
}
else {
context.trace.getErrorHandler().genericError(parameter.getNode(), "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation");
type = ErrorUtils.createErrorType("Cannot be inferred");
}
}
ValueParameterDescriptor valueParameterDescriptor = context.classDescriptorResolver.resolveValueParameterDescriptor(functionDescriptor, parameter, i, type);
parameterTypes.add(valueParameterDescriptor.getOutType());
valueParameterDescriptors.add(valueParameterDescriptor);
}
JetType effectiveReceiverType;
if (receiverTypeRef == null) {
if (functionTypeExpected) {
effectiveReceiverType = JetStandardClasses.getReceiverType(expectedType);
}
else {
effectiveReceiverType = null;
}
}
else {
effectiveReceiverType = receiverType;
}
functionDescriptor.initialize(effectiveReceiverType, Collections.<TypeParameterDescriptor>emptyList(), valueParameterDescriptors, null);
context.trace.record(BindingContext.FUNCTION, expression, functionDescriptor);
JetType returnType = NO_EXPECTED_TYPE;
JetScope functionInnerScope = FunctionDescriptorUtil.getFunctionInnerScope(context.scope, functionDescriptor, context.trace);
JetTypeReference returnTypeRef = functionLiteral.getReturnTypeRef();
if (returnTypeRef != null) {
returnType = context.typeResolver.resolveType(context.scope, returnTypeRef);
context.services.checkFunctionReturnType(functionInnerScope, expression, returnType, context.dataFlowInfo, CoercionStrategy.COERCION_TO_UNIT);
}
else {
if (functionTypeExpected) {
returnType = JetStandardClasses.getReturnType(expectedType);
}
returnType = context.services.getBlockReturnedType(functionInnerScope, functionLiteral.getBodyExpression(), CoercionStrategy.COERCION_TO_UNIT, context.replaceExpectedType(returnType));
}
JetType safeReturnType = returnType == null ? ErrorUtils.createErrorType("<return type>") : returnType;
functionDescriptor.setReturnType(safeReturnType);
if (functionTypeExpected) {
JetType expectedReturnType = JetStandardClasses.getReturnType(expectedType);
if (JetStandardClasses.isUnit(expectedReturnType)) {
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, expectedReturnType), expression, context);
}
}
return context.services.checkType(JetStandardClasses.getFunctionType(Collections.<AnnotationDescriptor>emptyList(), effectiveReceiverType, parameterTypes, safeReturnType), expression, context);
}
@Override
public JetType visitParenthesizedExpression(JetParenthesizedExpression expression, TypeInferenceContext context) {
return context.services.checkType(getType(expression.getExpression(), context.replaceScope(context.scope)), expression, context);
}
@Override
public JetType visitConstantExpression(JetConstantExpression expression, TypeInferenceContext context) {
ASTNode node = expression.getNode();
IElementType elementType = node.getElementType();
String text = node.getText();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
CompileTimeConstantResolver compileTimeConstantResolver = context.services.compileTimeConstantResolver;
CompileTimeConstant<?> value;
if (elementType == JetNodeTypes.INTEGER_CONSTANT) {
value = compileTimeConstantResolver.getIntegerValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.FLOAT_CONSTANT) {
value = compileTimeConstantResolver.getFloatValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.BOOLEAN_CONSTANT) {
value = compileTimeConstantResolver.getBooleanValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.CHARACTER_CONSTANT) {
value = compileTimeConstantResolver.getCharValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.RAW_STRING_CONSTANT) {
value = compileTimeConstantResolver.getRawStringValue(text, context.expectedType);
}
else if (elementType == JetNodeTypes.NULL) {
value = compileTimeConstantResolver.getNullValue(context.expectedType);
}
else {
throw new IllegalArgumentException("Unsupported constant: " + expression);
}
if (value instanceof ErrorValue) {
ErrorValue errorValue = (ErrorValue) value;
context.trace.getErrorHandler().genericError(node, errorValue.getMessage());
return getDefaultType(elementType);
}
else {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, value);
return context.services.checkType(value.getType(standardLibrary), expression, context);
}
}
@NotNull
private JetType getDefaultType(IElementType constantType) {
if (constantType == JetNodeTypes.INTEGER_CONSTANT) {
return semanticServices.getStandardLibrary().getIntType();
}
else if (constantType == JetNodeTypes.FLOAT_CONSTANT) {
return semanticServices.getStandardLibrary().getDoubleType();
}
else if (constantType == JetNodeTypes.BOOLEAN_CONSTANT) {
return semanticServices.getStandardLibrary().getBooleanType();
}
else if (constantType == JetNodeTypes.CHARACTER_CONSTANT) {
return semanticServices.getStandardLibrary().getCharType();
}
else if (constantType == JetNodeTypes.RAW_STRING_CONSTANT) {
return semanticServices.getStandardLibrary().getStringType();
}
else if (constantType == JetNodeTypes.NULL) {
return JetStandardClasses.getNullableNothingType();
}
else {
throw new IllegalArgumentException("Unsupported constant type: " + constantType);
}
}
@Override
public JetType visitThrowExpression(JetThrowExpression expression, TypeInferenceContext context) {
JetExpression thrownExpression = expression.getThrownExpression();
if (thrownExpression != null) {
JetType type = getType(thrownExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
// TODO : check that it inherits Throwable
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitReturnExpression(JetReturnExpression expression, TypeInferenceContext context) {
if (context.expectedReturnType == FORBIDDEN) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'return' is not allowed here");
return null;
}
JetExpression returnedExpression = expression.getReturnedExpression();
JetType returnedType = JetStandardClasses.getUnitType();
if (returnedExpression != null) {
getType(returnedExpression, context.replaceExpectedType(context.expectedReturnType).replaceScope(context.scope));
}
else {
if (context.expectedReturnType != NO_EXPECTED_TYPE && !JetStandardClasses.isUnit(context.expectedReturnType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "This function must return a value of type " + context.expectedReturnType);
}
}
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBreakExpression(JetBreakExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitContinueExpression(JetContinueExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetStandardClasses.getNothingType(), expression, context);
}
@Override
public JetType visitBinaryWithTypeRHSExpression(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context) {
<<<<<<< MINE
=======
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
JetType actualType = getType(expression.getLeft(), context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
>>>>>>> YOURS
JetTypeReference right = expression.getRight();
JetType result = null;
if (right != null) {
JetType targetType = context.typeResolver.resolveType(context.scope, right);
if (isTypeFlexible(expression.getLeft())) {
TemporaryBindingTrace temporaryTraceWithExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
boolean success = checkBinaryWithTypeRHS(expression, context, targetType, targetType, temporaryTraceWithExpectedType);
if (success) {
temporaryTraceWithExpectedType.addAllMyDataTo(context.trace);
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
}
else {
TemporaryBindingTrace temporaryTraceWithoutExpectedType = new TemporaryBindingTrace(context.trace.getBindingContext());
checkBinaryWithTypeRHS(expression, context, targetType, NO_EXPECTED_TYPE, temporaryTraceWithoutExpectedType);
temporaryTraceWithoutExpectedType.addAllMyDataTo(context.trace);
}
IElementType operationType = expression.getOperationSign().getReferencedNameElementType();
result = operationType == JetTokens.AS_SAFE ? TypeUtils.makeNullable(targetType) : targetType;
}
else {
getType(context.scope, expression.getLeft(), false, context.replaceExpectedType(NO_EXPECTED_TYPE));
}
return context.services.checkType(result, expression, context);
}
private boolean isTypeFlexible(@Nullable JetExpression expression) {
if (expression == null) return false;
return TokenSet.create(
JetNodeTypes.INTEGER_CONSTANT,
JetNodeTypes.FLOAT_CONSTANT
).contains(expression.getNode().getElementType());
}
private boolean checkBinaryWithTypeRHS(JetBinaryExpressionWithTypeRHS expression, TypeInferenceContext context, @NotNull JetType targetType, @NotNull JetType expectedType, TemporaryBindingTrace temporaryTrace) {
TypeInferenceContext newContext = context.replaceExpectedTypeAndTrace(expectedType, temporaryTrace);
JetType actualType = getType(context.scope, expression.getLeft(), false, newContext);
if (actualType == null) return false;
JetSimpleNameExpression operationSign = expression.getOperationSign();
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.COLON) {
if (targetType != NO_EXPECTED_TYPE && !semanticServices.getTypeChecker().isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().typeMismatch(expression.getLeft(), targetType, actualType);
return false;
}
return true;
}
else if (operationType == JetTokens.AS_KEYWORD || operationType == JetTokens.AS_SAFE) {
checkForCastImpossibility(expression, actualType, targetType, context);
return true;
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unsupported binary operation");
return false;
}
}
private void checkForCastImpossibility(JetBinaryExpressionWithTypeRHS expression, JetType actualType, JetType targetType, TypeInferenceContext context) {
if (actualType == null || targetType == NO_EXPECTED_TYPE) return;
JetTypeChecker typeChecker = semanticServices.getTypeChecker();
if (!typeChecker.isSubtypeOf(targetType, actualType)) {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed, use ':' instead");
}
else {
// See JET-58 Make 'as never succeeds' a warning, or even never check for Java (external) types
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "This cast can never succeed");
}
}
else {
if (typeChecker.isSubtypeOf(actualType, targetType)) {
context.trace.getErrorHandler().genericWarning(expression.getOperationSign().getNode(), "No cast needed");
}
}
}
@Override
public JetType visitTupleExpression(JetTupleExpression expression, TypeInferenceContext context) {
List<JetExpression> entries = expression.getEntries();
List<JetType> types = new ArrayList<JetType>();
for (JetExpression entry : entries) {
types.add(context.services.safeGetType(context.scope, entry, NO_EXPECTED_TYPE)); // TODO
}
if (context.expectedType != NO_EXPECTED_TYPE && JetStandardClasses.isTupleType(context.expectedType)) {
List<JetType> enrichedTypes = context.services.checkArgumentTypes(types, entries, context.expectedType.getArguments(), context);
if (enrichedTypes != types) {
return JetStandardClasses.getTupleType(enrichedTypes);
}
}
// TODO : labels
return context.services.checkType(JetStandardClasses.getTupleType(types), expression, context);
}
@Override
public JetType visitThisExpression(JetThisExpression expression, TypeInferenceContext context) {
JetType result = null;
JetType thisType = null;
String labelName = expression.getLabelName();
if (labelName != null) {
Collection<DeclarationDescriptor> declarationsByLabel = context.scope.getDeclarationsByLabel(labelName);
int size = declarationsByLabel.size();
final JetSimpleNameExpression targetLabel = expression.getTargetLabel();
assert targetLabel != null;
if (size == 1) {
DeclarationDescriptor declarationDescriptor = declarationsByLabel.iterator().next();
if (declarationDescriptor instanceof ClassDescriptor) {
ClassDescriptor classDescriptor = (ClassDescriptor) declarationDescriptor;
thisType = classDescriptor.getDefaultType();
}
else if (declarationDescriptor instanceof FunctionDescriptor) {
FunctionDescriptor functionDescriptor = (FunctionDescriptor) declarationDescriptor;
thisType = functionDescriptor.getReceiverType();
}
else {
throw new UnsupportedOperationException(); // TODO
}
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
else if (size == 0) {
// This uses the info written by the control flow processor
PsiElement psiElement = BindingContextUtils.resolveToDeclarationPsiElement(context.trace.getBindingContext(), targetLabel);
if (psiElement instanceof JetFunctionLiteralExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(BindingContext.DECLARATION_TO_DESCRIPTOR, psiElement);
if (declarationDescriptor instanceof FunctionDescriptor) {
thisType = ((FunctionDescriptor) declarationDescriptor).getReceiverType();
if (thisType == null) {
thisType = JetStandardClasses.getNothingType();
}
else {
context.trace.record(REFERENCE_TARGET, targetLabel, declarationDescriptor);
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptor);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().unresolvedReference(targetLabel);
}
}
else {
context.trace.getErrorHandler().genericError(targetLabel.getNode(), "Ambiguous label");
}
}
else {
thisType = context.scope.getThisType();
DeclarationDescriptor declarationDescriptorForUnqualifiedThis = context.scope.getDeclarationDescriptorForUnqualifiedThis();
if (declarationDescriptorForUnqualifiedThis != null) {
context.trace.record(REFERENCE_TARGET, expression.getThisReference(), declarationDescriptorForUnqualifiedThis);
}
}
if (thisType != null) {
if (JetStandardClasses.isNothing(thisType)) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'this' is not defined in this context");
}
else {
JetTypeReference superTypeQualifier = expression.getSuperTypeQualifier();
if (superTypeQualifier != null) {
JetTypeElement superTypeElement = superTypeQualifier.getTypeElement();
// Errors are reported by the parser
if (superTypeElement instanceof JetUserType) {
JetUserType typeElement = (JetUserType) superTypeElement;
ClassifierDescriptor classifierCandidate = context.typeResolver.resolveClass(context.scope, typeElement);
if (classifierCandidate instanceof ClassDescriptor) {
ClassDescriptor superclass = (ClassDescriptor) classifierCandidate;
Collection<? extends JetType> supertypes = thisType.getConstructor().getSupertypes();
TypeSubstitutor substitutor = TypeSubstitutor.create(thisType);
for (JetType declaredSupertype : supertypes) {
if (declaredSupertype.getConstructor().equals(superclass.getTypeConstructor())) {
result = substitutor.safeSubstitute(declaredSupertype, Variance.INVARIANT);
break;
}
}
if (result == null) {
context.trace.getErrorHandler().genericError(superTypeElement.getNode(), "Not a superclass");
}
}
}
} else {
result = thisType;
}
if (result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, expression.getThisReference(), result);
}
}
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBlockExpression(JetBlockExpression expression, TypeInferenceContext context) {
<<<<<<< MINE
return getBlockReturnedType(context.scope, expression, context);
=======
return context.services.checkType(context.services.getBlockReturnedType(context.scope, expression, CoercionStrategy.NO_COERCION, context), expression, context);
>>>>>>> YOURS
}
@Override
public JetType visitWhenExpression(final JetWhenExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO :change scope according to the bound value in the when header
final JetExpression subjectExpression = expression.getSubjectExpression();
final JetType subjectType = subjectExpression != null ? context.services.safeGetType(context.scope, subjectExpression, NO_EXPECTED_TYPE) : ErrorUtils.createErrorType("Unknown type");
final VariableDescriptor variableDescriptor = subjectExpression != null ? context.services.getVariableDescriptorFromSimpleName(subjectExpression, context) : null;
// TODO : exhaustive patterns
Set<JetType> expressionTypes = Sets.newHashSet();
for (JetWhenEntry whenEntry : expression.getEntries()) {
JetWhenCondition[] conditions = whenEntry.getConditions();
DataFlowInfo newDataFlowInfo;
WritableScope scopeToExtend;
if (conditions.length == 1) {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in when entry");
newDataFlowInfo = context.dataFlowInfo;
JetWhenCondition condition = conditions[0];
if (condition != null) {
newDataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, scopeToExtend, context, variableDescriptor);
}
}
else {
scopeToExtend = newWritableScopeImpl(context.scope, context.trace); // We don't write to this scope
newDataFlowInfo = null;
for (JetWhenCondition condition : conditions) {
DataFlowInfo dataFlowInfo = checkWhenCondition(subjectExpression, subjectType, condition, newWritableScopeImpl(context.scope, context.trace), context, variableDescriptor);
if (newDataFlowInfo == null) {
newDataFlowInfo = dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.or(dataFlowInfo);
}
}
if (newDataFlowInfo == null) {
newDataFlowInfo = context.dataFlowInfo;
}
else {
newDataFlowInfo = newDataFlowInfo.and(context.dataFlowInfo);
}
}
JetExpression bodyExpression = whenEntry.getExpression();
if (bodyExpression != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, bodyExpression, newDataFlowInfo, contextWithExpectedType);
if (type != null) {
expressionTypes.add(type);
}
}
}
if (!expressionTypes.isEmpty()) {
return semanticServices.getTypeChecker().commonSupertype(expressionTypes);
}
else if (expression.getEntries().isEmpty()) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Entries required for when-expression"); // TODO : Scope, and maybe this should not an error
}
return null;
}
private DataFlowInfo checkWhenCondition(@Nullable final JetExpression subjectExpression, final JetType subjectType, JetWhenCondition condition, final WritableScope scopeToExtend, final TypeInferenceContext context, final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] newDataFlowInfo = new DataFlowInfo[]{context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitWhenConditionCall(JetWhenConditionCall condition) {
JetExpression callSuffixExpression = condition.getCallSuffixExpression();
// JetScope compositeScope = new ScopeWithReceiver(context.scope, subjectType, semanticServices.getTypeChecker());
if (callSuffixExpression != null) {
// JetType selectorReturnType = getType(compositeScope, callSuffixExpression, false, context);
JetType selectorReturnType = getSelectorReturnType(subjectType, callSuffixExpression, context);//getType(compositeScope, callSuffixExpression, false, context);
ensureBooleanResultWithCustomSubject(callSuffixExpression, selectorReturnType, "This expression", context);
context.services.checkNullSafety(subjectType, condition.getOperationTokenNode(), getCalleeFunctionDescriptor(callSuffixExpression, context));
}
}
@Override
public void visitWhenConditionInRange(JetWhenConditionInRange condition) {
JetExpression rangeExpression = condition.getRangeExpression();
if (rangeExpression != null) {
assert subjectExpression != null;
checkInExpression(condition.getOperationReference(), subjectExpression, rangeExpression, context);
}
}
@Override
public void visitWhenConditionIsPattern(JetWhenConditionIsPattern condition) {
JetPattern pattern = condition.getPattern();
if (pattern != null) {
newDataFlowInfo[0] = checkPatternType(pattern, subjectType, scopeToExtend, context, subjectVariables);
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer] : " + element);
}
});
return newDataFlowInfo[0];
}
private DataFlowInfo checkPatternType(@NotNull JetPattern pattern, @NotNull final JetType subjectType, @NotNull final WritableScope scopeToExtend, final TypeInferenceContext context, @NotNull final VariableDescriptor... subjectVariables) {
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
pattern.accept(new JetVisitorVoid() {
@Override
public void visitTypePattern(JetTypePattern typePattern) {
JetTypeReference typeReference = typePattern.getTypeReference();
if (typeReference != null) {
JetType type = context.typeResolver.resolveType(context.scope, typeReference);
checkTypeCompatibility(type, subjectType, typePattern);
result[0] = context.dataFlowInfo.isInstanceOf(subjectVariables, type);
}
}
@Override
public void visitTuplePattern(JetTuplePattern pattern) {
List<JetTuplePatternEntry> entries = pattern.getEntries();
TypeConstructor typeConstructor = subjectType.getConstructor();
if (!JetStandardClasses.getTuple(entries.size()).getTypeConstructor().equals(typeConstructor)
|| typeConstructor.getParameters().size() != entries.size()) {
context.trace.getErrorHandler().genericError(pattern.getNode(), "Type mismatch: subject is of type " + subjectType + " but the pattern is of type Tuple" + entries.size()); // TODO : message
}
else {
for (int i = 0, entriesSize = entries.size(); i < entriesSize; i++) {
JetTuplePatternEntry entry = entries.get(i);
JetType type = subjectType.getArguments().get(i).getType();
// TODO : is a name always allowed, ie for tuple patterns, not decomposer arg lists?
ASTNode nameLabelNode = entry.getNameLabelNode();
if (nameLabelNode != null) {
context.trace.getErrorHandler().genericError(nameLabelNode, "Unsupported [JetTypeInferrer]");
}
JetPattern entryPattern = entry.getPattern();
if (entryPattern != null) {
result[0] = result[0].and(checkPatternType(entryPattern, type, scopeToExtend, context));
}
}
}
}
@Override
public void visitDecomposerPattern(JetDecomposerPattern pattern) {
JetExpression decomposerExpression = pattern.getDecomposerExpression();
if (decomposerExpression != null) {
JetType selectorReturnType = getSelectorReturnType(subjectType, decomposerExpression, context);
result[0] = checkPatternType(pattern.getArgumentList(), selectorReturnType == null ? ErrorUtils.createErrorType("No type") : selectorReturnType, scopeToExtend, context);
}
}
@Override
public void visitWildcardPattern(JetWildcardPattern pattern) {
// Nothing
}
@Override
public void visitExpressionPattern(JetExpressionPattern pattern) {
JetExpression expression = pattern.getExpression();
if (expression != null) {
JetType type = getType(expression, context.replaceScope(scopeToExtend));
checkTypeCompatibility(type, subjectType, pattern);
}
}
@Override
public void visitBindingPattern(JetBindingPattern pattern) {
JetProperty variableDeclaration = pattern.getVariableDeclaration();
JetTypeReference propertyTypeRef = variableDeclaration.getPropertyTypeRef();
JetType type = propertyTypeRef == null ? subjectType : context.typeResolver.resolveType(context.scope, propertyTypeRef);
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptorWithType(context.scope.getContainingDeclaration(), variableDeclaration, type);
scopeToExtend.addVariableDescriptor(variableDescriptor);
if (propertyTypeRef != null) {
if (!semanticServices.getTypeChecker().isSubtypeOf(subjectType, type)) {
context.trace.getErrorHandler().genericError(propertyTypeRef.getNode(), type + " must be a supertype of " + subjectType + ". Use 'is' to match against " + type);
}
}
JetWhenCondition condition = pattern.getCondition();
if (condition != null) {
int oldLength = subjectVariables.length;
VariableDescriptor[] newSubjectVariables = new VariableDescriptor[oldLength + 1];
System.arraycopy(subjectVariables, 0, newSubjectVariables, 0, oldLength);
newSubjectVariables[oldLength] = variableDescriptor;
result[0] = checkWhenCondition(null, subjectType, condition, scopeToExtend, context, newSubjectVariables);
}
}
private void checkTypeCompatibility(@Nullable JetType type, @NotNull JetType subjectType, @NotNull JetElement reportErrorOn) {
// TODO : Take auto casts into account?
if (type == null) {
return;
}
if (TypeUtils.intersect(semanticServices.getTypeChecker(), Sets.newHashSet(type, subjectType)) == null) {
context.trace.getErrorHandler().genericError(reportErrorOn.getNode(), "Incompatible types: " + type + " and " + subjectType + " " + ErrorHandler.atLocation(reportErrorOn));
}
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [JetTypeInferrer]");
}
});
return result[0];
}
@Override
public JetType visitTryExpression(JetTryExpression expression, TypeInferenceContext context) {
JetExpression tryBlock = expression.getTryBlock();
List<JetCatchClause> catchClauses = expression.getCatchClauses();
JetFinallySection finallyBlock = expression.getFinallyBlock();
List<JetType> types = new ArrayList<JetType>();
for (JetCatchClause catchClause : catchClauses) {
JetParameter catchParameter = catchClause.getCatchParameter();
JetExpression catchBody = catchClause.getCatchBody();
if (catchParameter != null) {
VariableDescriptor variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, catchParameter);
if (catchBody != null) {
WritableScope catchScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Catch scope");
catchScope.addVariableDescriptor(variableDescriptor);
JetType type = getType(catchBody, context.replaceScope(catchScope));
if (type != null) {
types.add(type);
}
}
}
}
if (finallyBlock != null) {
types.clear(); // Do not need the list for the check, but need the code above to typecheck catch bodies
JetType type = getType(finallyBlock.getFinalExpression(), context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
}
JetType type = getType(tryBlock, context.replaceScope(context.scope));
if (type != null) {
types.add(type);
}
if (types.isEmpty()) {
return null;
}
else {
return semanticServices.getTypeChecker().commonSupertype(types);
}
}
@Override
public JetType visitIfExpression(JetIfExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression elseBranch = expression.getElse();
JetExpression thenBranch = expression.getThen();
WritableScopeImpl thenScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Then scope");
DataFlowInfo thenInfo = extractDataFlowInfoFromCondition(condition, true, thenScope, context);
DataFlowInfo elseInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
if (elseBranch == null) {
if (thenBranch != null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
return null;
}
if (thenBranch == null) {
JetType type = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, context);
if (type != null && JetStandardClasses.isNothing(type)) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
JetType thenType = getTypeWithNewScopeAndDataFlowInfo(thenScope, thenBranch, thenInfo, contextWithExpectedType);
JetType elseType = getTypeWithNewScopeAndDataFlowInfo(context.scope, elseBranch, elseInfo, contextWithExpectedType);
JetType result;
if (thenType == null) {
result = elseType;
}
else if (elseType == null) {
result = thenType;
}
else {
result = semanticServices.getTypeChecker().commonSupertype(Arrays.asList(thenType, elseType));
}
boolean jumpInThen = thenType != null && JetStandardClasses.isNothing(thenType);
boolean jumpInElse = elseType != null && JetStandardClasses.isNothing(elseType);
if (jumpInThen && !jumpInElse) {
resultDataFlowInfo = elseInfo;
// resultScope = elseScope;
}
else if (jumpInElse && !jumpInThen) {
resultDataFlowInfo = thenInfo;
// resultScope = thenScope;
}
return result;
}
@NotNull
private DataFlowInfo extractDataFlowInfoFromCondition(@Nullable JetExpression condition, final boolean conditionValue, @Nullable final WritableScope scopeToExtend, final TypeInferenceContext context) {
if (condition == null) return context.dataFlowInfo;
final DataFlowInfo[] result = new DataFlowInfo[] {context.dataFlowInfo};
condition.accept(new JetVisitorVoid() {
@Override
public void visitIsExpression(JetIsExpression expression) {
if (conditionValue && !expression.isNegated() || !conditionValue && expression.isNegated()) {
JetPattern pattern = expression.getPattern();
result[0] = patternsToDataFlowInfo.get(pattern);
if (scopeToExtend != null) {
List<VariableDescriptor> descriptors = patternsToBoundVariableLists.get(pattern);
if (descriptors != null) {
for (VariableDescriptor variableDescriptor : descriptors) {
scopeToExtend.addVariableDescriptor(variableDescriptor);
}
}
}
}
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
IElementType operationToken = expression.getOperationToken();
if (operationToken == JetTokens.ANDAND || operationToken == JetTokens.OROR) {
WritableScope actualScopeToExtend;
if (operationToken == JetTokens.ANDAND) {
actualScopeToExtend = conditionValue ? scopeToExtend : null;
}
else {
actualScopeToExtend = conditionValue ? null : scopeToExtend;
}
DataFlowInfo dataFlowInfo = extractDataFlowInfoFromCondition(expression.getLeft(), conditionValue, actualScopeToExtend, context);
JetExpression expressionRight = expression.getRight();
if (expressionRight != null) {
DataFlowInfo rightInfo = extractDataFlowInfoFromCondition(expressionRight, conditionValue, actualScopeToExtend, context);
DataFlowInfo.CompositionOperator operator;
if (operationToken == JetTokens.ANDAND) {
operator = conditionValue ? DataFlowInfo.AND : DataFlowInfo.OR;
}
else {
operator = conditionValue ? DataFlowInfo.OR : DataFlowInfo.AND;
}
dataFlowInfo = operator.compose(dataFlowInfo, rightInfo);
}
result[0] = dataFlowInfo;
}
else if (operationToken == JetTokens.EQEQ
|| operationToken == JetTokens.EXCLEQ
|| operationToken == JetTokens.EQEQEQ
|| operationToken == JetTokens.EXCLEQEQEQ) {
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
if (right == null) return;
if (!(left instanceof JetSimpleNameExpression)) {
JetExpression tmp = left;
left = right;
right = tmp;
if (!(left instanceof JetSimpleNameExpression)) {
return;
}
}
VariableDescriptor variableDescriptor = context.services.getVariableDescriptorFromSimpleName(left, context);
if (variableDescriptor == null) return;
// TODO : validate that DF makes sense for this variable: local, val, internal w/backing field, etc
// Comparison to a non-null expression
JetType rhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, right);
if (rhsType != null && !rhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, variableDescriptor, !conditionValue);
return;
}
VariableDescriptor rightVariable = context.services.getVariableDescriptorFromSimpleName(right, context);
if (rightVariable != null) {
JetType lhsType = context.trace.getBindingContext().get(BindingContext.EXPRESSION_TYPE, left);
if (lhsType != null && !lhsType.isNullable()) {
extendDataFlowWithNullComparison(operationToken, rightVariable, !conditionValue);
return;
}
}
// Comparison to 'null'
if (!(right instanceof JetConstantExpression)) {
return;
}
JetConstantExpression constantExpression = (JetConstantExpression) right;
if (constantExpression.getNode().getElementType() != JetNodeTypes.NULL) {
return;
}
extendDataFlowWithNullComparison(operationToken, variableDescriptor, conditionValue);
}
}
private void extendDataFlowWithNullComparison(IElementType operationToken, @NotNull VariableDescriptor variableDescriptor, boolean equalsToNull) {
if (operationToken == JetTokens.EQEQ || operationToken == JetTokens.EQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, !equalsToNull);
}
else if (operationToken == JetTokens.EXCLEQ || operationToken == JetTokens.EXCLEQEQEQ) {
result[0] = context.dataFlowInfo.equalsToNull(variableDescriptor, equalsToNull);
}
}
@Override
public void visitUnaryExpression(JetUnaryExpression expression) {
IElementType operationTokenType = expression.getOperationSign().getReferencedNameElementType();
if (operationTokenType == JetTokens.EXCL) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression != null) {
result[0] = extractDataFlowInfoFromCondition(baseExpression, !conditionValue, scopeToExtend, context);
}
}
}
@Override
public void visitParenthesizedExpression(JetParenthesizedExpression expression) {
JetExpression body = expression.getExpression();
if (body != null) {
body.accept(this);
}
}
});
if (result[0] == null) {
return context.dataFlowInfo;
}
return result[0];
}
private void checkCondition(@NotNull JetScope scope, @Nullable JetExpression condition, TypeInferenceContext context) {
if (condition != null) {
JetType conditionType = getType(condition, context.replaceScope(scope));
if (conditionType != null && !isBoolean(conditionType)) {
context.trace.getErrorHandler().genericError(condition.getNode(), "Condition must be of type Boolean, but was of type " + conditionType);
}
}
}
@Override
public JetType visitWhileExpression(JetWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression condition = expression.getCondition();
checkCondition(context.scope, condition, context);
JetExpression body = expression.getBody();
if (body != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in while's condition");
DataFlowInfo conditionInfo = condition == null ? context.dataFlowInfo : extractDataFlowInfoFromCondition(condition, true, scopeToExtend, context);
getTypeWithNewScopeAndDataFlowInfo(scopeToExtend, body, conditionInfo, context);
}
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Override
public JetType visitDoWhileExpression(JetDoWhileExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression body = expression.getBody();
JetScope conditionScope = context.scope;
if (body instanceof JetFunctionLiteralExpression) {
JetFunctionLiteralExpression function = (JetFunctionLiteralExpression) body;
if (!function.getFunctionLiteral().hasParameterSpecification()) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, function.getFunctionLiteral().getBodyExpression().getStatements(), CoercionStrategy.NO_COERCION, context);
context.trace.record(BindingContext.BLOCK, function);
} else {
getType(body, context.replaceScope(context.scope));
}
}
else if (body != null) {
WritableScope writableScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("do..while body scope");
conditionScope = writableScope;
context.services.getBlockReturnedTypeWithWritableScope(writableScope, Collections.singletonList(body), CoercionStrategy.NO_COERCION, context);
}
JetExpression condition = expression.getCondition();
checkCondition(conditionScope, condition, context);
if (!flowInformationProvider.isBreakable(expression)) {
// resultScope = newWritableScopeImpl();
resultDataFlowInfo = extractDataFlowInfoFromCondition(condition, false, null, context);
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
protected WritableScopeImpl newWritableScopeImpl(JetScope scope, BindingTrace trace) {
return new WritableScopeImpl(scope, scope.getContainingDeclaration(), trace.getErrorHandler());
}
@Override
public JetType visitForExpression(JetForExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetParameter loopParameter = expression.getLoopParameter();
JetExpression loopRange = expression.getLoopRange();
JetType loopRangeType = null;
if (loopRange != null) {
loopRangeType = getType(loopRange, context.replaceScope(context.scope));
}
JetType expectedParameterType = null;
if (loopRangeType != null) {
expectedParameterType = checkIterableConvention(loopRangeType, loopRange, context);
}
WritableScope loopScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope with for-loop index");
if (loopParameter != null) {
JetTypeReference typeReference = loopParameter.getTypeReference();
VariableDescriptor variableDescriptor;
if (typeReference != null) {
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), context.scope, loopParameter);
JetType actualParameterType = variableDescriptor.getOutType();
if (expectedParameterType != null &&
actualParameterType != null &&
!semanticServices.getTypeChecker().isSubtypeOf(expectedParameterType, actualParameterType)) {
context.trace.getErrorHandler().genericError(typeReference.getNode(), "The loop iterates over values of type " + expectedParameterType + " but the parameter is declared to be " + actualParameterType);
}
}
else {
if (expectedParameterType == null) {
expectedParameterType = ErrorUtils.createErrorType("Error");
}
variableDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(context.scope.getContainingDeclaration(), loopParameter, expectedParameterType);
}
loopScope.addVariableDescriptor(variableDescriptor);
}
JetExpression body = expression.getBody();
if (body != null) {
getType(body, context.replaceScope(loopScope));
}
return context.services.checkType(JetStandardClasses.getUnitType(), expression, contextWithExpectedType);
}
@Nullable
<<<<<<< MINE
private JetType checkIterableConvention(@NotNull JetType type, @NotNull JetExpression loopRange, TypeInferenceContext context) {
ASTNode reportErrorsOn = loopRange.getNode();
OverloadResolutionResult iteratorResolutionResult = context.services.resolveNoParametersFunction(type, context.scope, "iterator");
=======
private JetType checkIterableConvention(@NotNull JetType type, @NotNull ASTNode reportErrorsOn, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> iteratorResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, type, "iterator", Collections.<JetType>emptyList());
>>>>>>> YOURS
if (iteratorResolutionResult.isSuccess()) {
<<<<<<< MINE
FunctionDescriptor iteratorFunction = iteratorResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_ITERATOR, loopRange, iteratorFunction);
JetType iteratorType = iteratorFunction.getReturnType();
FunctionDescriptor hasNextFunction = checkHasNextFunctionSupport(loopRange, iteratorType, context);
boolean hasNextFunctionSupported = hasNextFunction != null;
VariableDescriptor hasNextProperty = checkHasNextPropertySupport(loopRange, iteratorType, context);
boolean hasNextPropertySupported = hasNextProperty != null;
=======
JetType iteratorType = iteratorResolutionResult.getDescriptor().getReturnType();
boolean hasNextFunctionSupported = checkHasNextFunctionSupport(reportErrorsOn, iteratorType, context);
boolean hasNextPropertySupported = checkHasNextPropertySupport(reportErrorsOn, iteratorType, context);
>>>>>>> YOURS
if (hasNextFunctionSupported && hasNextPropertySupported && !ErrorUtils.isErrorType(iteratorType)) {
// TODO : overload resolution rules impose priorities here???
context.trace.getErrorHandler().genericError(reportErrorsOn, "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext()' property");
}
else if (!hasNextFunctionSupported && !hasNextPropertySupported) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property");
}
else {
context.trace.record(LOOP_RANGE_HAS_NEXT, loopRange, hasNextFunctionSupported ? hasNextFunction : hasNextProperty);
}
OverloadResolutionResult<FunctionDescriptor> nextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "next", Collections.<JetType>emptyList());
if (nextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Method 'iterator().next()' is ambiguous for this expression");
} else if (nextResolutionResult.isNothing()) {
context.trace.getErrorHandler().genericError(reportErrorsOn, "Loop range must have an 'iterator().next()' method");
} else {
<<<<<<< MINE
FunctionDescriptor nextFunction = nextResolutionResult.getFunctionDescriptor();
context.trace.record(LOOP_RANGE_NEXT, loopRange, nextFunction);
return nextFunction.getReturnType();
=======
return nextResolutionResult.getDescriptor().getReturnType();
>>>>>>> YOURS
}
}
else {
String errorMessage = "For-loop range must have an iterator() method";
if (iteratorResolutionResult.isAmbiguity()) {
errorMessage = "Method 'iterator()' is ambiguous for this expression";
}
context.trace.getErrorHandler().genericError(reportErrorsOn, errorMessage);
}
return null;
}
<<<<<<< MINE
@Nullable
private FunctionDescriptor checkHasNextFunctionSupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult hasNextResolutionResult = context.services.resolveNoParametersFunction(iteratorType, context.scope, "hasNext");
=======
private boolean checkHasNextFunctionSupport(@NotNull ASTNode reportErrorsOn, @NotNull JetType iteratorType, TypeInferenceContext context) {
OverloadResolutionResult<FunctionDescriptor> hasNextResolutionResult = context.services.callResolver.resolveExactSignature(context.scope, iteratorType, "hasNext", Collections.<JetType>emptyList());
>>>>>>> YOURS
if (hasNextResolutionResult.isAmbiguity()) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "Method 'iterator().hasNext()' is ambiguous for this expression");
} else if (hasNextResolutionResult.isNothing()) {
return null;
} else {
<<<<<<< MINE
assert hasNextResolutionResult.isSuccess();
JetType hasNextReturnType = hasNextResolutionResult.getFunctionDescriptor().getReturnType();
=======
JetType hasNextReturnType = hasNextResolutionResult.getDescriptor().getReturnType();
>>>>>>> YOURS
if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext()' method of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextResolutionResult.getFunctionDescriptor();
}
@Nullable
private VariableDescriptor checkHasNextPropertySupport(@NotNull JetExpression loopRange, @NotNull JetType iteratorType, TypeInferenceContext context) {
VariableDescriptor hasNextProperty = iteratorType.getMemberScope().getVariable("hasNext");
// TODO :extension properties
if (hasNextProperty == null) {
return null;
} else {
JetType hasNextReturnType = hasNextProperty.getOutType();
if (hasNextReturnType == null) {
// TODO : accessibility
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must be readable");
}
else if (!isBoolean(hasNextReturnType)) {
context.trace.getErrorHandler().genericError(loopRange.getNode(), "The 'iterator().hasNext' property of the loop range must return Boolean, but returns " + hasNextReturnType);
}
}
return hasNextProperty;
}
@Override
public JetType visitHashQualifiedExpression(JetHashQualifiedExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getOperationTokenNode(), "Unsupported");
return null;
}
@Override
public JetType visitQualifiedExpression(JetQualifiedExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
// TODO : functions as values
JetExpression selectorExpression = expression.getSelectorExpression();
JetExpression receiverExpression = expression.getReceiverExpression();
JetType receiverType = context.services.typeInferrerVisitorWithNamespaces.getType(receiverExpression, context.replaceExpectedTypes(NO_EXPECTED_TYPE, NO_EXPECTED_TYPE));
if (selectorExpression == null) return null;
if (receiverType == null) receiverType = ErrorUtils.createErrorType("Type for " + expression.getText());
// Clean resolution: no autocasts
TemporaryBindingTrace cleanResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext cleanResolutionContext = context.replaceBindingTrace(cleanResolutionTrace);
JetType selectorReturnType = getSelectorReturnType(receiverType, selectorExpression, cleanResolutionContext);
//TODO move further
if (expression.getOperationSign() == JetTokens.SAFE_ACCESS) {
if (selectorReturnType != null && !selectorReturnType.isNullable() && !JetStandardClasses.isUnit(selectorReturnType)) {
selectorReturnType = TypeUtils.makeNullable(selectorReturnType);
}
}
if (selectorReturnType != null) {
cleanResolutionTrace.addAllMyDataTo(context.trace);
}
else {
VariableDescriptor variableDescriptor = cleanResolutionContext.services.getVariableDescriptorFromSimpleName(receiverExpression, context);
boolean somethingFound = false;
if (variableDescriptor != null) {
List<JetType> possibleTypes = Lists.newArrayList(context.dataFlowInfo.getPossibleTypes(variableDescriptor));
Collections.reverse(possibleTypes);
TemporaryBindingTrace autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
TypeInferenceContext autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
for (JetType possibleType : possibleTypes) {
selectorReturnType = getSelectorReturnType(possibleType, selectorExpression, autocastResolutionContext);
if (selectorReturnType != null) {
context.services.checkAutoCast(receiverExpression, possibleType, variableDescriptor, autocastResolutionTrace);
autocastResolutionTrace.commit();
somethingFound = true;
break;
}
else {
autocastResolutionTrace = TemporaryBindingTrace.create(context.trace);
autocastResolutionContext = context.replaceBindingTrace(autocastResolutionTrace);
}
}
}
if (!somethingFound) {
cleanResolutionTrace.commit();
}
}
JetType result;
if (expression.getOperationSign() == JetTokens.QUEST) {
if (selectorReturnType != null && !isBoolean(selectorReturnType) && selectorExpression != null) {
// TODO : more comprehensible error message
context.trace.getErrorHandler().typeMismatch(selectorExpression, semanticServices.getStandardLibrary().getBooleanType(), selectorReturnType);
}
result = TypeUtils.makeNullable(receiverType);
}
else {
result = selectorReturnType;
}
if (selectorExpression != null && result != null) {
context.trace.record(BindingContext.EXPRESSION_TYPE, selectorExpression, result);
}
if (selectorReturnType != null) {
// TODO : extensions to 'Any?'
if (selectorExpression != null) {
receiverType = context.services.enrichOutType(receiverExpression, receiverType, context);
context.services.checkNullSafety(receiverType, expression.getOperationTokenNode(), getCalleeFunctionDescriptor(selectorExpression, context));
}
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
@NotNull
private FunctionDescriptor getCalleeFunctionDescriptor(@NotNull JetExpression selectorExpression, final TypeInferenceContext context) {
final FunctionDescriptor[] result = new FunctionDescriptor[1];
selectorExpression.accept(new JetVisitorVoid() {
@Override
public void visitCallExpression(JetCallExpression callExpression) {
JetExpression calleeExpression = callExpression.getCalleeExpression();
if (calleeExpression != null) {
calleeExpression.accept(this);
}
}
@Override
public void visitReferenceExpression(JetReferenceExpression referenceExpression) {
DeclarationDescriptor declarationDescriptor = context.trace.getBindingContext().get(REFERENCE_TARGET, referenceExpression);
if (declarationDescriptor instanceof FunctionDescriptor) {
result[0] = (FunctionDescriptor) declarationDescriptor;
}
}
@Override
public void visitArrayAccessExpression(JetArrayAccessExpression expression) {
expression.getArrayExpression().accept(this);
}
@Override
public void visitBinaryExpression(JetBinaryExpression expression) {
expression.getLeft().accept(this);
}
@Override
public void visitQualifiedExpression(JetQualifiedExpression expression) {
expression.getReceiverExpression().accept(this);
}
@Override
public void visitJetElement(JetElement element) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported [getCalleeFunctionDescriptor]: " + element);
}
});
if (result[0] == null) {
result[0] = ErrorUtils.createErrorFunction(0, Collections.<JetType>emptyList());
}
return result[0];
}
@Nullable
private JetType getSelectorReturnType(@Nullable JetType receiverType, @NotNull JetExpression selectorExpression, @NotNull TypeInferenceContext context) {
if (selectorExpression instanceof JetCallExpression) {
return context.services.callResolver.resolveCall(context.trace, context.scope, receiverType, (JetCallExpression) selectorExpression, context.expectedType);
}
else if (selectorExpression instanceof JetSimpleNameExpression) {
JetSimpleNameExpression nameExpression = (JetSimpleNameExpression) selectorExpression;
TemporaryBindingTrace temporaryTrace = TemporaryBindingTrace.create(context.trace);
VariableDescriptor variableDescriptor = context.services.callResolver.resolveSimpleProperty(temporaryTrace, context.scope, receiverType, nameExpression, context.expectedType);
if (variableDescriptor != null) {
temporaryTrace.commit();
return context.services.checkEnrichedType(variableDescriptor.getOutType(), nameExpression, context);
}
TypeInferenceContext newContext = receiverType == null ? context : context.replaceScope(receiverType.getMemberScope());
JetType jetType = lookupNamespaceOrClassObject(nameExpression, nameExpression.getReferencedName(), newContext);
if (jetType == null) {
context.trace.getErrorHandler().unresolvedReference(nameExpression);
}
return context.services.checkEnrichedType(jetType, nameExpression, context);
// JetScope scope = receiverType != null ? receiverType.getMemberScope() : context.scope;
// return getType(selectorExpression, context.replaceScope(scope));
}
else if (selectorExpression instanceof JetQualifiedExpression) {
JetQualifiedExpression qualifiedExpression = (JetQualifiedExpression) selectorExpression;
JetType newReceiverType = getSelectorReturnType(receiverType, qualifiedExpression.getReceiverExpression(), context.replaceExpectedType(NO_EXPECTED_TYPE));
JetExpression newSelectorExpression = qualifiedExpression.getSelectorExpression();
if (newReceiverType != null && newSelectorExpression != null) {
return getSelectorReturnType(newReceiverType, newSelectorExpression, context);
}
}
else {
// TODO : not a simple name -> resolve in scope, expect property type or a function type
context.trace.getErrorHandler().genericError(selectorExpression.getNode(), "Unsupported selector element type: " + selectorExpression);
}
return null;
}
@Override
public JetType visitCallExpression(JetCallExpression expression, TypeInferenceContext context) {
JetType expressionType = context.services.callResolver.resolveCall(context.trace, context.scope, null, expression, context.expectedType);
return context.services.checkType(expressionType, expression, context);
}
@Override
public JetType visitIsExpression(JetIsExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetType knownType = getType(expression.getLeftHandSide(), context.replaceScope(context.scope));
JetPattern pattern = expression.getPattern();
if (pattern != null && knownType != null) {
WritableScopeImpl scopeToExtend = newWritableScopeImpl(context.scope, context.trace).setDebugName("Scope extended in 'is'");
DataFlowInfo newDataFlowInfo = checkPatternType(pattern, knownType, scopeToExtend, context, context.services.getVariableDescriptorFromSimpleName(expression.getLeftHandSide(), context));
patternsToDataFlowInfo.put(pattern, newDataFlowInfo);
patternsToBoundVariableLists.put(pattern, scopeToExtend.getDeclaredVariables());
}
return context.services.checkType(semanticServices.getStandardLibrary().getBooleanType(), expression, contextWithExpectedType);
}
@Override
public JetType visitUnaryExpression(JetUnaryExpression expression, TypeInferenceContext context) {
JetExpression baseExpression = expression.getBaseExpression();
if (baseExpression == null) return null;
JetSimpleNameExpression operationSign = expression.getOperationSign();
if (JetTokens.LABELS.contains(operationSign.getReferencedNameElementType())) {
// TODO : Some processing for the label?
return context.services.checkType(getType(baseExpression, context.replaceExpectedReturnType(context.expectedType)), expression, context);
}
IElementType operationType = operationSign.getReferencedNameElementType();
String name = unaryOperationNames.get(operationType);
if (name == null) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown unary operation");
return null;
}
JetType receiverType = getType(baseExpression, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(context.scope));
if (receiverType == null) return null;
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression),
expression.getOperationSign(),
name,
receiverType,
context.expectedType);
if (functionDescriptor == null) return null;
JetType returnType = functionDescriptor.getReturnType();
JetType result;
if (operationType == JetTokens.PLUSPLUS || operationType == JetTokens.MINUSMINUS) {
if (semanticServices.getTypeChecker().isSubtypeOf(returnType, JetStandardClasses.getUnitType())) {
result = JetStandardClasses.getUnitType();
}
else {
if (!semanticServices.getTypeChecker().isSubtypeOf(returnType, receiverType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), name + " must return " + receiverType + " but returns " + returnType);
}
else {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
// TODO : Maybe returnType?
result = receiverType;
}
}
else {
result = returnType;
}
return context.services.checkType(result, expression, context);
}
@Override
public JetType visitBinaryExpression(JetBinaryExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
JetType result = null;
IElementType operationType = operationSign.getReferencedNameElementType();
if (operationType == JetTokens.IDENTIFIER) {
String referencedName = operationSign.getReferencedName();
if (referencedName != null) {
result = getTypeForBinaryCall(context.scope, referencedName, context, expression);
}
}
else if (binaryOperationNames.containsKey(operationType)) {
result = getTypeForBinaryCall(context.scope, binaryOperationNames.get(operationType), context, expression);
}
else if (operationType == JetTokens.EQ) {
result = visitAssignment(expression, context);
}
else if (assignmentOperationNames.containsKey(operationType)) {
result = visitAssignmentOperation(expression, context);
}
else if (comparisonOperations.contains(operationType)) {
JetType compareToReturnType = getTypeForBinaryCall(context.scope, "compareTo", context, expression);
if (compareToReturnType != null) {
TypeConstructor constructor = compareToReturnType.getConstructor();
JetStandardLibrary standardLibrary = semanticServices.getStandardLibrary();
TypeConstructor intTypeConstructor = standardLibrary.getInt().getTypeConstructor();
if (constructor.equals(intTypeConstructor)) {
result = standardLibrary.getBooleanType();
} else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "compareTo must return Int, but returns " + compareToReturnType);
}
}
}
else if (equalsOperations.contains(operationType)) {
String name = "equals";
if (right != null) {
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
OverloadResolutionResult<FunctionDescriptor> resolutionResult = context.services.callResolver.resolveExactSignature(
context.scope, leftType, "equals",
Collections.singletonList(JetStandardClasses.getNullableAnyType()));
if (resolutionResult.isSuccess()) {
FunctionDescriptor equals = resolutionResult.getDescriptor();
context.trace.record(REFERENCE_TARGET, operationSign, equals);
if (ensureBooleanResult(operationSign, name, equals.getReturnType(), context)) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
}
}
else {
if (resolutionResult.isAmbiguity()) {
StringBuilder stringBuilder = new StringBuilder();
for (FunctionDescriptor functionDescriptor : resolutionResult.getDescriptors()) {
stringBuilder.append(DescriptorRenderer.TEXT.render(functionDescriptor)).append(" ");
}
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Ambiguous function: " + stringBuilder);
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "No method 'equals(Any?) : Boolean' available");
}
}
}
}
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.EQEQEQ || operationType == JetTokens.EXCLEQEQEQ) {
ensureNonemptyIntersectionOfOperandTypes(expression, context);
// TODO : Check comparison pointlessness
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (inOperations.contains(operationType)) {
if (right == null) {
result = ErrorUtils.createErrorType("No right argument"); // TODO
return null;
}
checkInExpression(operationSign, left, right, context);
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ANDAND || operationType == JetTokens.OROR) {
JetType leftType = getType(left, context.replaceScope(context.scope));
WritableScopeImpl leftScope = newWritableScopeImpl(context.scope, context.trace).setDebugName("Left scope of && or ||");
DataFlowInfo flowInfoLeft = extractDataFlowInfoFromCondition(left, operationType == JetTokens.ANDAND, leftScope, context); // TODO: This gets computed twice: here and in extractDataFlowInfoFromCondition() for the whole condition
WritableScopeImpl rightScope = operationType == JetTokens.ANDAND ? leftScope : newWritableScopeImpl(context.scope, context.trace).setDebugName("Right scope of && or ||");
JetType rightType = right == null ? null : getType(right, context.replaceDataFlowInfo(flowInfoLeft).replaceScope(rightScope));
if (leftType != null && !isBoolean(leftType)) {
context.trace.getErrorHandler().typeMismatch(left, semanticServices.getStandardLibrary().getBooleanType(), leftType);
}
if (rightType != null && !isBoolean(rightType)) {
context.trace.getErrorHandler().typeMismatch(right, semanticServices.getStandardLibrary().getBooleanType(), rightType);
}
result = semanticServices.getStandardLibrary().getBooleanType();
}
else if (operationType == JetTokens.ELVIS) {
JetType leftType = getType(left, context.replaceScope(context.scope));
JetType rightType = right == null ? null : getType(right, contextWithExpectedType.replaceScope(context.scope));
if (leftType != null) {
if (!leftType.isNullable()) {
context.trace.getErrorHandler().genericWarning(left.getNode(), "Elvis operator (?:) is always returns the left operand of non-nullable type " + leftType);
}
if (rightType != null) {
context.services.checkType(TypeUtils.makeNullableAsSpecified(leftType, rightType.isNullable()), left, contextWithExpectedType);
return TypeUtils.makeNullableAsSpecified(semanticServices.getTypeChecker().commonSupertype(leftType, rightType), rightType.isNullable());
}
}
}
else {
context.trace.getErrorHandler().genericError(operationSign.getNode(), "Unknown operation");
}
return context.services.checkType(result, expression, contextWithExpectedType);
}
private void checkInExpression(JetSimpleNameExpression operationSign, JetExpression left, JetExpression right, TypeInferenceContext context) {
String name = "contains";
JetType receiverType = context.services.safeGetType(context.scope, right, NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(operationSign, Collections.singletonList(left)),
operationSign,
name, receiverType, context.expectedType);
JetType containsType = functionDescriptor != null ? functionDescriptor.getReturnType() : null;
ensureBooleanResult(operationSign, name, containsType, context);
}
private void ensureNonemptyIntersectionOfOperandTypes(JetBinaryExpression expression, TypeInferenceContext context) {
JetSimpleNameExpression operationSign = expression.getOperationReference();
JetExpression left = expression.getLeft();
JetExpression right = expression.getRight();
// TODO : duplicated effort for == and !=
JetType leftType = getType(left, context.replaceScope(context.scope));
if (leftType != null && right != null) {
JetType rightType = getType(right, context.replaceScope(context.scope));
if (rightType != null) {
JetType intersect = TypeUtils.intersect(semanticServices.getTypeChecker(), new HashSet<JetType>(Arrays.asList(leftType, rightType)));
if (intersect == null) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Operator " + operationSign.getReferencedName() + " cannot be applied to " + leftType + " and " + rightType);
}
}
}
}
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
return assignmentIsNotAnExpressionError(expression, context);
}
private JetType assignmentIsNotAnExpressionError(JetBinaryExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "Assignments are not expressions, and only expressions are allowed in this context");
return null;
}
private boolean ensureBooleanResult(JetExpression operationSign, String name, JetType resultType, TypeInferenceContext context) {
return ensureBooleanResultWithCustomSubject(operationSign, resultType, "'" + name + "'", context);
}
private boolean ensureBooleanResultWithCustomSubject(JetExpression operationSign, JetType resultType, String subjectName, TypeInferenceContext context) {
if (resultType != null) {
// TODO : Relax?
if (!isBoolean(resultType)) {
context.trace.getErrorHandler().genericError(operationSign.getNode(), subjectName + " must return Boolean but returns " + resultType);
return false;
}
}
return true;
}
private boolean isBoolean(@NotNull JetType type) {
return semanticServices.getTypeChecker().isConvertibleTo(type, semanticServices.getStandardLibrary().getBooleanType());
}
@Override
public JetType visitArrayAccessExpression(JetArrayAccessExpression expression, TypeInferenceContext contextWithExpectedType) {
TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
JetExpression arrayExpression = expression.getArrayExpression();
JetType receiverType = getType(arrayExpression, context.replaceScope(context.scope));
if (receiverType != null) {
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
context.scope,
CallMaker.makeCall(expression, expression.getIndexExpressions()),
expression,
"get",
receiverType,
context.expectedType);
if (functionDescriptor != null) {
return context.services.checkType(functionDescriptor.getReturnType(), expression, contextWithExpectedType);
}
}
return null;
}
@Nullable
protected JetType getTypeForBinaryCall(JetScope scope, String name, TypeInferenceContext context, JetBinaryExpression binaryExpression) {
JetType leftType = getType(binaryExpression.getLeft(), context.replaceScope(scope));
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
CallMaker.makeCall(binaryExpression),
binaryExpression.getOperationReference(),
name,
leftType,
context.expectedType);
if (functionDescriptor != null) {
// if (leftType.isNullable()) {
// // TODO : better error message for '1 + nullableVar' case
// context.trace.getErrorHandler().genericError(operationSign.getNode(),
// "Infix call corresponds to a dot-qualified call '" +
// left.getText() + "." + name + "(" + right.getText() + ")'" +
// " which is not allowed on a nullable receiver '" + right.getText() + "'." +
// " Use '?.'-qualified call instead");
// }
return functionDescriptor.getReturnType();
}
return null;
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(dcl.getNode(), "Declarations are not allowed in this position");
return null;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(expression.getNode(), "'namespace' is not an expression");
return null;
}
@Override
public JetType visitStringTemplateExpression(JetStringTemplateExpression expression, TypeInferenceContext contextWithExpectedType) {
final TypeInferenceContext context = contextWithExpectedType.replaceExpectedType(NO_EXPECTED_TYPE);
final StringBuilder builder = new StringBuilder();
final CompileTimeConstant<?>[] value = new CompileTimeConstant<?>[1];
for (JetStringTemplateEntry entry : expression.getEntries()) {
entry.accept(new JetVisitorVoid() {
@Override
public void visitStringTemplateEntryWithExpression(JetStringTemplateEntryWithExpression entry) {
JetExpression entryExpression = entry.getExpression();
if (entryExpression != null) {
getType(entryExpression, context.replaceScope(context.scope));
}
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
@Override
public void visitLiteralStringTemplateEntry(JetLiteralStringTemplateEntry entry) {
builder.append(entry.getText());
}
@Override
public void visitEscapeStringTemplateEntry(JetEscapeStringTemplateEntry entry) {
// TODO : Check escape
String text = entry.getText();
assert text.length() == 2 && text.charAt(0) == '\\';
char escaped = text.charAt(1);
Character character = CompileTimeConstantResolver.translateEscape(escaped);
if (character == null) {
context.trace.getErrorHandler().genericError(entry.getNode(), "Illegal escape sequence");
value[0] = CompileTimeConstantResolver.OUT_OF_RANGE;
}
else {
builder.append(character);
}
}
});
}
if (value[0] != CompileTimeConstantResolver.OUT_OF_RANGE) {
context.trace.record(BindingContext.COMPILE_TIME_VALUE, expression, new StringValue(builder.toString()));
}
return context.services.checkType(semanticServices.getStandardLibrary().getStringType(), expression, contextWithExpectedType);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "[JetTypeInferrer] Unsupported element: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
private class TypeInferrerVisitorWithNamespaces extends TypeInferrerVisitor {
@Override
public boolean isNamespacePosition() {
return true;
}
@Override
public JetType visitRootNamespaceExpression(JetRootNamespaceExpression expression, TypeInferenceContext context) {
return context.services.checkType(JetModuleUtil.getRootNamespaceType(expression), expression, context);
}
@Override
protected boolean furtherNameLookup(@NotNull JetSimpleNameExpression expression, @NotNull String referencedName, @NotNull JetType[] result, TypeInferenceContext context) {
result[0] = lookupNamespaceType(expression, referencedName, context);
return result[0] != null;
}
}
private class TypeInferrerVisitorWithWritableScope extends TypeInferrerVisitor {
private final WritableScope scope;
public TypeInferrerVisitorWithWritableScope(@NotNull WritableScope scope) {
this.scope = scope;
}
@Override
public JetType visitObjectDeclaration(JetObjectDeclaration declaration, TypeInferenceContext context) {
TopDownAnalyzer topDownAnalyzer = new TopDownAnalyzer(semanticServices, context.trace);
topDownAnalyzer.processObject(scope, scope.getContainingDeclaration(), declaration);
ClassDescriptor classDescriptor = context.trace.getBindingContext().get(BindingContext.CLASS, declaration);
if (classDescriptor != null) {
PropertyDescriptor propertyDescriptor = context.classDescriptorResolver.resolveObjectDeclarationAsPropertyDescriptor(scope.getContainingDeclaration(), declaration, classDescriptor);
scope.addVariableDescriptor(propertyDescriptor);
}
return null;
}
@Override
public JetType visitProperty(JetProperty property, TypeInferenceContext context) {
JetTypeReference receiverTypeRef = property.getReceiverTypeRef();
if (receiverTypeRef != null) {
context.trace.getErrorHandler().genericError(receiverTypeRef.getNode(), "Local receiver-properties are not allowed");
}
JetPropertyAccessor getter = property.getGetter();
if (getter != null) {
context.trace.getErrorHandler().genericError(getter.getNode(), "Local variables are not allowed to have getters");
}
JetPropertyAccessor setter = property.getSetter();
if (setter != null) {
context.trace.getErrorHandler().genericError(setter.getNode(), "Local variables are not allowed to have setters");
}
VariableDescriptor propertyDescriptor = context.classDescriptorResolver.resolveLocalVariableDescriptor(scope.getContainingDeclaration(), scope, property);
JetExpression initializer = property.getInitializer();
if (property.getPropertyTypeRef() != null && initializer != null) {
JetType outType = propertyDescriptor.getOutType();
JetType initializerType = getType(initializer, context.replaceExpectedType(outType).replaceScope(scope));
// if (outType != null &&
// initializerType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(initializerType, outType)) {
// context.trace.getErrorHandler().typeMismatch(initializer, outType, initializerType);
// }
}
scope.addVariableDescriptor(propertyDescriptor);
return null;
}
@Override
public JetType visitNamedFunction(JetNamedFunction function, TypeInferenceContext context) {
FunctionDescriptorImpl functionDescriptor = context.classDescriptorResolver.resolveFunctionDescriptor(scope.getContainingDeclaration(), scope, function);
scope.addFunctionDescriptor(functionDescriptor);
context.services.checkFunctionReturnType(context.scope, function, functionDescriptor, context.dataFlowInfo);
return null;
}
@Override
public JetType visitClass(JetClass klass, TypeInferenceContext context) {
return super.visitClass(klass, context); // TODO
}
@Override
public JetType visitTypedef(JetTypedef typedef, TypeInferenceContext context) {
return super.visitTypedef(typedef, context); // TODO
}
@Override
public JetType visitDeclaration(JetDeclaration dcl, TypeInferenceContext context) {
return visitJetElement(dcl, context);
}
@Override
protected JetType visitAssignmentOperation(JetBinaryExpression expression, TypeInferenceContext context) {
IElementType operationType = expression.getOperationReference().getReferencedNameElementType();
String name = assignmentOperationNames.get(operationType);
TemporaryBindingTrace temporaryBindingTrace = TemporaryBindingTrace.create(context.trace);
JetType assignmentOperationType = getTypeForBinaryCall(scope, name, context.replaceBindingTrace(temporaryBindingTrace), expression);
if (assignmentOperationType == null) {
String counterpartName = binaryOperationNames.get(assignmentOperationCounterparts.get(operationType));
JetType typeForBinaryCall = getTypeForBinaryCall(scope, counterpartName, context, expression);
if (typeForBinaryCall != null) {
context.trace.record(BindingContext.VARIABLE_REASSIGNMENT, expression);
}
}
else {
temporaryBindingTrace.commit();
}
return null;
}
@Override
protected JetType visitAssignment(JetBinaryExpression expression, TypeInferenceContext context) {
JetExpression left = expression.getLeft();
JetExpression deparenthesized = JetPsiUtil.deparenthesize(left);
JetExpression right = expression.getRight();
if (deparenthesized instanceof JetArrayAccessExpression) {
JetArrayAccessExpression arrayAccessExpression = (JetArrayAccessExpression) deparenthesized;
return resolveArrayAccessToLValue(arrayAccessExpression, right, expression.getOperationReference(), context);
}
JetType leftType = getType(left, context.replaceExpectedType(NO_EXPECTED_TYPE).replaceScope(scope));
if (right != null) {
JetType rightType = getType(right, context.replaceExpectedType(leftType).replaceScope(scope));
// if (rightType != null &&
// leftType != null &&
// !semanticServices.getTypeChecker().isConvertibleTo(rightType, leftType)) {
// context.trace.getErrorHandler().typeMismatch(right, leftType, rightType);
// }
}
return null;
}
private JetType resolveArrayAccessToLValue(JetArrayAccessExpression arrayAccessExpression, JetExpression rightHandSide, JetSimpleNameExpression operationSign, TypeInferenceContext context) {
JetType receiverType = getType(arrayAccessExpression.getArrayExpression(), context.replaceScope(scope));
if (receiverType == null) return null;
//
Call call = CallMaker.makeCall(arrayAccessExpression, rightHandSide);
// // TODO : nasty hack: effort is duplicated
// context.services.callResolver.resolveCallWithGivenName(
// scope,
// call,
// arrayAccessExpression,
// "set", arrayAccessExpression.getArrayExpression(), NO_EXPECTED_TYPE);
FunctionDescriptor functionDescriptor = context.services.callResolver.resolveCallWithGivenName(
context.trace,
scope,
call,
arrayAccessExpression,
"set", receiverType, NO_EXPECTED_TYPE);
if (functionDescriptor == null) return null;
context.trace.record(REFERENCE_TARGET, operationSign, functionDescriptor);
return context.services.checkType(functionDescriptor.getReturnType(), arrayAccessExpression, context);
}
@Override
public JetType visitJetElement(JetElement element, TypeInferenceContext context) {
context.trace.getErrorHandler().genericError(element.getNode(), "Unsupported element in a block: " + element + " " + element.getClass().getCanonicalName());
return null;
}
}
}
Diff Result
No diff
Case 27 - java_lucenesolr.rev_2ede7_249fd..ExactPhraseScorer.java
Base
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.*;
final class ExactPhraseScorer extends PhraseScorer {
ExactPhraseScorer(Weight weight, DocsAndPositionsEnum[] postings, int[] offsets,
Similarity similarity, byte[] norms) {
super(weight, postings, offsets, similarity, norms);
}
@Override
protected final float phraseFreq() throws IOException {
// sort list with pq
pq.clear();
for (PhrasePositions pp = first; pp != null; pp = pp.next) {
pp.firstPosition();
pq.add(pp); // build pq from list
}
pqToList(); // rebuild list from pq
// for counting how many times the exact phrase is found in current document,
// just count how many times all PhrasePosition's have exactly the same position.
int freq = 0;
do { // find position w/ all terms
while (first.position < last.position) { // scan forward in first
do {
if (!first.nextPosition())
return freq;
} while (first.position < last.position);
firstToLast();
}
freq++; // all equal: a match
} while (last.nextPosition());
return freq;
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.*;
final class ExactPhraseScorer extends PhraseScorer {
ExactPhraseScorer(Weight weight, DocsAndPositionsEnum[] postings, int[] offsets,
Similarity similarity, byte[] norms) {
super(weight, postings, offsets, similarity, norms);
}
@Override
protected final float phraseFreq() throws IOException {
// sort list with pq
pq.clear();
for (PhrasePositions pp = first; pp != null; pp = pp.next) {
pp.firstPosition();
pq.add(pp); // build pq from list
}
pqToList(); // rebuild list from pq
// for counting how many times the exact phrase is found in current document,
// just count how many times all PhrasePosition's have exactly the same position.
int freq = 0;
do { // find position w/ all terms
while (first.position < last.position) { // scan forward in first
do {
if (!first.nextPosition())
return freq;
} while (first.position < last.position);
firstToLast();
}
freq++; // all equal: a match
} while (last.nextPosition());
return freq;
}
}
Left
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
super(similarity, weight);
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
@Override
public float freq() {
return freq;
}
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
super(similarity, weight);
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
@Override
public float freq() {
return freq;
}
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
Right
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
final class ExactPhraseScorer extends Scorer {
private final Weight weight;
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
super(similarity);
this.weight = weight;
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
// used by MultiPhraseQuery
float currentFreq() {
return freq;
}
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
final class ExactPhraseScorer extends Scorer {
private final Weight weight;
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
super(similarity);
this.weight = weight;
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
// used by MultiPhraseQuery
float currentFreq() {
return freq;
}
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
MergeMethods
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
private final Weight weight;
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
<<<<<<< MINE
super(similarity, weight);
=======
super(similarity);
this.weight = weight;
>>>>>>> YOURS
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
<<<<<<< MINE
@Override
public float freq() {
return freq;
}
=======
float currentFreq() {
return freq;
}
>>>>>>> YOURS
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
private final Weight weight;
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
<<<<<<< MINE
super(similarity, weight);
=======
super(similarity);
this.weight = weight;
>>>>>>> YOURS
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
<<<<<<< MINE
@Override
public float freq() {
return freq;
}
=======
float currentFreq() {
return freq;
}
>>>>>>> YOURS
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
KeepBothMethods
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
private final Weight weight;
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
<<<<<<< MINE
super(similarity, weight);
=======
super(similarity);
this.weight = weight;
>>>>>>> YOURS
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
@Override
public float freq() {
return freq;
}
// used by MultiPhraseQuery
float currentFreq() {
return freq;
}
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
private final Weight weight;
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
<<<<<<< MINE
super(similarity, weight);
=======
super(similarity);
this.weight = weight;
>>>>>>> YOURS
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
@Override
public float freq() {
return freq;
}
// used by MultiPhraseQuery
float currentFreq() {
return freq;
}
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
Safe
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
private final Weight weight;
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
<<<<<<< MINE
super(similarity, weight);
=======
super(similarity);
this.weight = weight;
>>>>>>> YOURS
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
<<<<<<< MINE
@Override
public float freq() {
return freq;
}
=======
float currentFreq() {
return freq;
}
>>>>>>> YOURS
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
private final Weight weight;
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
<<<<<<< MINE
super(similarity, weight);
=======
super(similarity);
this.weight = weight;
>>>>>>> YOURS
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
<<<<<<< MINE
@Override
public float freq() {
return freq;
}
=======
float currentFreq() {
return freq;
}
>>>>>>> YOURS
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
Unstructured
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
<<<<<<< MINE
=======
private final Weight weight;
>>>>>>> YOURS
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
<<<<<<< MINE
super(similarity, weight);
=======
super(similarity);
this.weight = weight;
>>>>>>> YOURS
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
<<<<<<< MINE
@Override
public float freq() {
=======
// used by MultiPhraseQuery
float currentFreq() {
>>>>>>> YOURS
return freq;
}
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.BooleanClause.Occur;
final class ExactPhraseScorer extends Scorer {
<<<<<<< MINE
=======
private final Weight weight;
>>>>>>> YOURS
private final byte[] norms;
private final float value;
private static final int SCORE_CACHE_SIZE = 32;
private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
private final int endMinus1;
private final static int CHUNK = 4096;
private int gen;
private final int[] counts = new int[CHUNK];
private final int[] gens = new int[CHUNK];
boolean noDocs;
private final static class ChunkState {
final DocsAndPositionsEnum posEnum;
final int offset;
final boolean useAdvance;
int posUpto;
int posLimit;
int pos;
int lastPos;
public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
this.posEnum = posEnum;
this.offset = offset;
this.useAdvance = useAdvance;
}
}
private final ChunkState[] chunkStates;
private int docID = -1;
private int freq;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
Similarity similarity, byte[] norms) throws IOException {
<<<<<<< MINE
super(similarity, weight);
=======
super(similarity);
this.weight = weight;
>>>>>>> YOURS
this.norms = norms;
this.value = weight.getValue();
chunkStates = new ChunkState[postings.length];
endMinus1 = postings.length-1;
for(int i=0;i<postings.length;i++) {
// Coarse optimization: advance(target) is fairly
// costly, so, if the relative freq of the 2nd
// rarest term is not that much (> 1/5th) rarer than
// the first term, then we just use .nextDoc() when
// ANDing. This buys ~15% gain for phrases where
// freq of rarest 2 terms is close:
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
noDocs = true;
return;
}
}
for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
scoreCache[i] = getSimilarity().tf((float) i) * value;
}
}
@Override
public int nextDoc() throws IOException {
while(true) {
// first (rarest) term
final int doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
// not-first terms
int i = 1;
while(i < chunkStates.length) {
final ChunkState cs = chunkStates[i];
int doc2 = cs.posEnum.docID();
if (cs.useAdvance) {
if (doc2 < doc) {
doc2 = cs.posEnum.advance(doc);
}
} else {
int iter = 0;
while(doc2 < doc) {
// safety net -- fallback to .advance if we've
// done too many .nextDocs
if (++iter == 50) {
doc2 = cs.posEnum.advance(doc);
break;
} else {
doc2 = cs.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
}
}
@Override
public int advance(int target) throws IOException {
// first term
int doc = chunkStates[0].posEnum.advance(target);
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = DocsEnum.NO_MORE_DOCS;
return doc;
}
while(true) {
// not-first terms
int i = 1;
while(i < chunkStates.length) {
int doc2 = chunkStates[i].posEnum.docID();
if (doc2 < doc) {
doc2 = chunkStates[i].posEnum.advance(doc);
}
if (doc2 > doc) {
break;
}
i++;
}
if (i == chunkStates.length) {
// this doc has all the terms -- now test whether
// phrase occurs
docID = doc;
freq = phraseFreq();
if (freq != 0) {
return docID;
}
}
doc = chunkStates[0].posEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) {
docID = doc;
return doc;
}
}
}
@Override
public String toString() {
return "ExactPhraseScorer(" + weight + ")";
}
<<<<<<< MINE
@Override
public float freq() {
=======
// used by MultiPhraseQuery
float currentFreq() {
>>>>>>> YOURS
return freq;
}
@Override
public int docID() {
return docID;
}
@Override
public float score() throws IOException {
final float raw; // raw score
if (freq < SCORE_CACHE_SIZE) {
raw = scoreCache[freq];
} else {
raw = getSimilarity().tf((float) freq) * value;
}
return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[docID]); // normalize
}
private int phraseFreq() throws IOException {
freq = 0;
// init chunks
for(int i=0;i<chunkStates.length;i++) {
final ChunkState cs = chunkStates[i];
cs.posLimit = cs.posEnum.freq();
cs.pos = cs.offset + cs.posEnum.nextPosition();
cs.posUpto = 1;
cs.lastPos = -1;
}
int chunkStart = 0;
int chunkEnd = CHUNK;
// process chunk by chunk
boolean end = false;
// TODO: we could fold in chunkStart into offset and
// save one subtract per pos incr
while(!end) {
gen++;
if (gen == 0) {
// wraparound
Arrays.fill(gens, 0);
gen++;
}
// first term
{
final ChunkState cs = chunkStates[0];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
counts[posIndex] = 1;
assert gens[posIndex] != gen;
gens[posIndex] = gen;
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
// middle terms
boolean any = true;
for(int t=1;t<endMinus1;t++) {
final ChunkState cs = chunkStates[t];
any = false;
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == t) {
// viable
counts[posIndex]++;
any = true;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
if (!any) {
break;
}
}
if (!any) {
// petered out for this chunk
chunkStart += CHUNK;
chunkEnd += CHUNK;
continue;
}
// last term
{
final ChunkState cs = chunkStates[endMinus1];
while(cs.pos < chunkEnd) {
if (cs.pos > cs.lastPos) {
cs.lastPos = cs.pos;
final int posIndex = cs.pos - chunkStart;
if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
freq++;
}
}
if (cs.posUpto == cs.posLimit) {
end = true;
break;
}
cs.posUpto++;
cs.pos = cs.offset + cs.posEnum.nextPosition();
}
}
chunkStart += CHUNK;
chunkEnd += CHUNK;
}
return freq;
}
}
Diff Result
No diff
Case 28 - java_lucenesolr.rev_2ede7_249fd..TermInfosReader.java
Base
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final int termOrd;
public TermInfoAndOrd(TermInfo ti, int termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
private Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
return this.term.equals(other);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
final void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
final long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private final int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
int delta = term.compareTo(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private final void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd);
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term) throws IOException {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareTo(enumerator.prev())> 0)
|| term.compareTo(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareTo(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareTo(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareTo(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private final boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
final long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareTo(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareTo(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final int termOrd;
public TermInfoAndOrd(TermInfo ti, int termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
private Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
return this.term.equals(other);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
final void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
final long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private final int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
int delta = term.compareTo(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private final void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd);
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term) throws IOException {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareTo(enumerator.prev())> 0)
|| term.compareTo(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareTo(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareTo(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareTo(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private final boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
final long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareTo(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareTo(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
Left
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i=0;indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()),
new TermInfoAndOrd(enumerator.termInfo,
enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i=0;indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()),
new TermInfoAndOrd(enumerator.termInfo,
enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
Right
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final int termOrd;
public TermInfoAndOrd(TermInfo ti, int termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
private Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
return this.term.equals(other);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i=0;indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd);
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term) throws IOException {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final int termOrd;
public TermInfoAndOrd(TermInfo ti, int termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
private Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
return this.term.equals(other);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i=0;indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd);
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term) throws IOException {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
MergeMethods
package org.apache.lucene.index.codecs.preflex;
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private static final int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private static final class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor) throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION), readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION), readBufferSize), fieldInfos, true);
try {
// otherwise read index
int indexSize = 1 + ((int) indexEnum.size - 1) / indexDivisor;
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++) if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
// binary search indexTerms[]
int lo = 0;
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset], ((long) indexOffset * totalIndexInterval) - 1, indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0)
return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()), new TermInfoAndOrd(enumerator.termInfo, enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (// term is at or past current
enumerator.term() != null && ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev()) > 0) || term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int) (enumerator.position / totalIndexInterval) + 1;
if (// but before end of block
indexTerms.length == enumOffset || term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval && ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0)
return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while (term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {
}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum) origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum) getThreadResources().termEnum.clone();
}
}
package org.apache.lucene.index.codecs.preflex;
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private static final int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private static final class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor) throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION), readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION), readBufferSize), fieldInfos, true);
try {
// otherwise read index
int indexSize = 1 + ((int) indexEnum.size - 1) / indexDivisor;
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++) if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
// binary search indexTerms[]
int lo = 0;
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset], ((long) indexOffset * totalIndexInterval) - 1, indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0)
return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()), new TermInfoAndOrd(enumerator.termInfo, enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (// term is at or past current
enumerator.term() != null && ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev()) > 0) || term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int) (enumerator.position / totalIndexInterval) + 1;
if (// but before end of block
indexTerms.length == enumOffset || term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval && ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0)
return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while (term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {
}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum) origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum) getThreadResources().termEnum.clone();
}
}
KeepBothMethods
package org.apache.lucene.index.codecs.preflex;
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private static final int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private static final class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor) throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION), readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION), readBufferSize), fieldInfos, true);
try {
// otherwise read index
int indexSize = 1 + ((int) indexEnum.size - 1) / indexDivisor;
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++) if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
// binary search indexTerms[]
int lo = 0;
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset], ((long) indexOffset * totalIndexInterval) - 1, indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0)
return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()), new TermInfoAndOrd(enumerator.termInfo, enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (// term is at or past current
enumerator.term() != null && ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev()) > 0) || term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int) (enumerator.position / totalIndexInterval) + 1;
if (// but before end of block
indexTerms.length == enumOffset || term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (numScans > 1) {
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (// term is at or past current
enumerator.term() != null && ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev()) > 0) || term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int) (enumerator.position / totalIndexInterval) + 1;
if (// but before end of block
indexTerms.length == enumOffset || term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval && ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0)
return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while (term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {
}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum) origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum) getThreadResources().termEnum.clone();
}
}
package org.apache.lucene.index.codecs.preflex;
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private static final int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private static final class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor) throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION), readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION), readBufferSize), fieldInfos, true);
try {
// otherwise read index
int indexSize = 1 + ((int) indexEnum.size - 1) / indexDivisor;
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++) if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
// binary search indexTerms[]
int lo = 0;
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset], ((long) indexOffset * totalIndexInterval) - 1, indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0)
return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()), new TermInfoAndOrd(enumerator.termInfo, enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (// term is at or past current
enumerator.term() != null && ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev()) > 0) || term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int) (enumerator.position / totalIndexInterval) + 1;
if (// but before end of block
indexTerms.length == enumOffset || term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (numScans > 1) {
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (// term is at or past current
enumerator.term() != null && ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev()) > 0) || term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int) (enumerator.position / totalIndexInterval) + 1;
if (// but before end of block
indexTerms.length == enumOffset || term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval && ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0)
return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while (term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {
}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum) origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum) getThreadResources().termEnum.clone();
}
}
Safe
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()),
new TermInfoAndOrd(enumerator.termInfo,
enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
<<<<<<< MINE
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
=======
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
>>>>>>> YOURS
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()),
new TermInfoAndOrd(enumerator.termInfo,
enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
<<<<<<< MINE
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = tiOrd.termOrd / totalIndexInterval;
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (tiOrd == null) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position));
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
=======
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
>>>>>>> YOURS
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
Unstructured
package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i=0;indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()),
new TermInfoAndOrd(enumerator.termInfo,
enumerator.position));
}
<<<<<<< MINE
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
=======
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
>>>>>>> YOURS
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
<<<<<<< MINE
ti = enumerator.termInfo;
=======
ti = enumerator.termInfo();
>>>>>>> YOURS
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
<<<<<<< MINE
ti = enumerator.termInfo;
=======
ti = enumerator.termInfo();
>>>>>>> YOURS
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}package org.apache.lucene.index.codecs.preflex;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.util.DoubleBarrelLRUCache;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set
* @deprecated (4.0) This class has been replaced by
* FormatPostingsTermsDictReader, except for reading old segments.
* @lucene.experimental
*/
@Deprecated
public final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
// Just adds term's ord to TermInfo
private final static class TermInfoAndOrd extends TermInfo {
final long termOrd;
public TermInfoAndOrd(TermInfo ti, long termOrd) {
super(ti);
this.termOrd = termOrd;
}
}
private static class CloneableTerm extends DoubleBarrelLRUCache.CloneableKey {
Term term;
public CloneableTerm(Term t) {
this.term = t;
}
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
public int hashCode() {
return term.hashCode();
}
public Object clone() {
return new CloneableTerm(term);
}
}
private final DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm,TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i=0;indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
assert indexTerms[i] != null;
assert indexTerms[i].text() != null;
assert indexTerms[i].field() != null;
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
int delta = term.compareToUTF16(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, false);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean mustSeekEnum) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfoAndOrd tiOrd = termsCache.get(new CloneableTerm(term));
ThreadResources resources = getThreadResources();
if (!mustSeekEnum && tiOrd != null) {
return tiOrd;
}
return seekEnum(resources.termEnum, term, tiOrd, true);
}
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()),
new TermInfoAndOrd(enumerator.termInfo,
enumerator.position));
}
<<<<<<< MINE
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
=======
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
>>>>>>> YOURS
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
<<<<<<< MINE
ti = enumerator.termInfo;
=======
ti = enumerator.termInfo();
>>>>>>> YOURS
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
<<<<<<< MINE
ti = enumerator.termInfo;
=======
ti = enumerator.termInfo();
>>>>>>> YOURS
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
// called only from asserts
private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
if (ti1.docFreq != ti2.docFreq) {
return false;
}
if (ti1.freqPointer != ti2.freqPointer) {
return false;
}
if (ti1.proxPointer != ti2.proxPointer) {
return false;
}
// skipOffset is only valid when docFreq >= skipInterval:
if (ti1.docFreq >= enumerator.skipInterval &&
ti1.skipOffset != ti2.skipOffset) {
return false;
}
return true;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareToUTF16(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
get(term, true);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
Diff Result
No diff
Case 29 - java_lucenesolr.rev_2ede7_249fd..TermsHashPerField.java
Base
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
final class TermsHashPerField extends InvertedDocConsumerPerField {
final TermsHashConsumerPerField consumer;
final TermsHashPerField nextPerField;
final TermsHashPerThread perThread;
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
boolean postingsCompacted;
int numPostings;
private int postingsHashSize = 4;
private int postingsHashHalfSize = postingsHashSize/2;
private int postingsHashMask = postingsHashSize-1;
private int[] postingsHash;
ParallelPostingsArray postingsArray;
private final BytesRef utf8;
private Comparator<BytesRef> termComp;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
initPostingsArray();
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
utf8 = perThread.utf8;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
private void initPostingsArray() {
postingsArray = consumer.createPostingsArray(2);
bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (perThread.termsHash.trackAllocations) {
perThread.termsHash.docWriter.bytesUsed(size);
}
}
void shrinkHash(int targetSize) {
assert postingsCompacted || numPostings == 0;
final int newSize = 4;
if (newSize != postingsHash.length) {
final long previousSize = postingsHash.length;
postingsHash = new int[newSize];
bytesUsed((newSize-previousSize)*RamUsageEstimator.NUM_BYTES_INT);
Arrays.fill(postingsHash, -1);
postingsHashSize = newSize;
postingsHashHalfSize = newSize/2;
postingsHashMask = newSize-1;
}
// Fully free the postings array on each flush:
if (postingsArray != null) {
bytesUsed(-postingsArray.bytesPerPosting() * postingsArray.size);
postingsArray = null;
}
}
public void reset() {
if (!postingsCompacted)
compactPostings();
assert numPostings <= postingsHash.length;
if (numPostings > 0) {
Arrays.fill(postingsHash, 0, numPostings, -1);
numPostings = 0;
}
postingsCompacted = false;
if (nextPerField != null)
nextPerField.reset();
}
@Override
synchronized public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
private final void growParallelPostingsArray() {
int oldSize = postingsArray.size;
this.postingsArray = this.postingsArray.grow();
bytesUsed(postingsArray.bytesPerPosting() * (postingsArray.size - oldSize));
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriter.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private synchronized void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
this.termComp = termComp;
compactPostings();
quickSort(postingsHash, 0, numPostings-1);
return postingsHash;
}
void quickSort(int[] termIDs, int lo, int hi) {
if (lo >= hi)
return;
else if (hi == 1+lo) {
if (comparePostings(termIDs[lo], termIDs[hi]) > 0) {
final int tmp = termIDs[lo];
termIDs[lo] = termIDs[hi];
termIDs[hi] = tmp;
}
return;
}
int mid = (lo + hi) >>> 1;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp;
}
if (comparePostings(termIDs[mid], termIDs[hi]) > 0) {
int tmp = termIDs[mid];
termIDs[mid] = termIDs[hi];
termIDs[hi] = tmp;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp2 = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp2;
}
}
int left = lo + 1;
int right = hi - 1;
if (left >= right)
return;
int partition = termIDs[mid];
for (; ;) {
while (comparePostings(termIDs[right], partition) > 0)
--right;
while (left < right && comparePostings(termIDs[left], partition) <= 0)
++left;
if (left < right) {
int tmp = termIDs[left];
termIDs[left] = termIDs[right];
termIDs[right] = tmp;
--right;
} else {
break;
}
}
quickSort(termIDs, lo, left);
quickSort(termIDs, left + 1, hi);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(perThread.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(perThread.tr2, postingsArray.textStarts[term2]);
return termComp.compare(perThread.tr1, perThread.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriter.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriter.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
if (postingsArray == null) {
initPostingsArray();
}
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int code = textStart;
int hashPos = code & postingsHashMask;
assert !postingsCompacted;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && postingsArray.textStarts[termID] != textStart) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && postingsArray.textStarts[termID] != textStart);
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
// New posting
termID = numPostings++;
if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
assert termID >= 0;
postingsArray.textStarts[termID] = textStart;
assert postingsHash[hashPos] == -1;
postingsHash[hashPos] = termID;
if (numPostings == postingsHashHalfSize)
rehashPostings(2*postingsHashSize);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (DocumentsWriter.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
assert !postingsCompacted;
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE) {
// Not enough room in current block
if (utf8.length > DocumentsWriter.MAX_TERM_LENGTH_UTF8) {
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = utf8.length;
try {
utf8.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
} finally {
utf8.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
bytePool.nextBuffer();
}
// New posting
termID = numPostings++;
if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
assert termID != -1;
assert postingsHash[hashPos] == -1;
postingsHash[hashPos] = termID;
final byte[] text = bytePool.buffer;
final int textUpto = bytePool.byteUpto;
postingsArray.textStarts[termID] = textUpto + bytePool.byteOffset;
// We first encode the length, followed by the UTF8
// bytes. Length is encoded as vInt, but will consume
// 1 or 2 bytes at most (we reject too-long terms,
// above).
// encode length @ start of bytes
if (utf8.length < 128) {
// 1 byte to store length
text[textUpto] = (byte) utf8.length;
bytePool.byteUpto += utf8.length + 1;
System.arraycopy(utf8.bytes, 0, text, textUpto+1, utf8.length);
} else {
// 2 byte to store length
text[textUpto] = (byte) (0x80 | (utf8.length & 0x7f));
text[textUpto+1] = (byte) ((utf8.length>>7) & 0xff);
bytePool.byteUpto += utf8.length + 2;
System.arraycopy(utf8.bytes, 0, text, textUpto+2, utf8.length);
}
if (numPostings == postingsHashHalfSize) {
rehashPostings(2*postingsHashSize);
bytesUsed(2*numPostings * RamUsageEstimator.NUM_BYTES_INT);
}
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (DocumentsWriter.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
byte[] bytes = bytePool.buffers[upto >> DocumentsWriter.BYTE_BLOCK_SHIFT];
assert bytes != null;
int offset = upto & DocumentsWriter.BYTE_BLOCK_MASK;
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (perThread.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriter.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriter.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
final class TermsHashPerField extends InvertedDocConsumerPerField {
final TermsHashConsumerPerField consumer;
final TermsHashPerField nextPerField;
final TermsHashPerThread perThread;
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
boolean postingsCompacted;
int numPostings;
private int postingsHashSize = 4;
private int postingsHashHalfSize = postingsHashSize/2;
private int postingsHashMask = postingsHashSize-1;
private int[] postingsHash;
ParallelPostingsArray postingsArray;
private final BytesRef utf8;
private Comparator<BytesRef> termComp;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
initPostingsArray();
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
utf8 = perThread.utf8;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
private void initPostingsArray() {
postingsArray = consumer.createPostingsArray(2);
bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (perThread.termsHash.trackAllocations) {
perThread.termsHash.docWriter.bytesUsed(size);
}
}
void shrinkHash(int targetSize) {
assert postingsCompacted || numPostings == 0;
final int newSize = 4;
if (newSize != postingsHash.length) {
final long previousSize = postingsHash.length;
postingsHash = new int[newSize];
bytesUsed((newSize-previousSize)*RamUsageEstimator.NUM_BYTES_INT);
Arrays.fill(postingsHash, -1);
postingsHashSize = newSize;
postingsHashHalfSize = newSize/2;
postingsHashMask = newSize-1;
}
// Fully free the postings array on each flush:
if (postingsArray != null) {
bytesUsed(-postingsArray.bytesPerPosting() * postingsArray.size);
postingsArray = null;
}
}
public void reset() {
if (!postingsCompacted)
compactPostings();
assert numPostings <= postingsHash.length;
if (numPostings > 0) {
Arrays.fill(postingsHash, 0, numPostings, -1);
numPostings = 0;
}
postingsCompacted = false;
if (nextPerField != null)
nextPerField.reset();
}
@Override
synchronized public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
private final void growParallelPostingsArray() {
int oldSize = postingsArray.size;
this.postingsArray = this.postingsArray.grow();
bytesUsed(postingsArray.bytesPerPosting() * (postingsArray.size - oldSize));
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriter.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private synchronized void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
this.termComp = termComp;
compactPostings();
quickSort(postingsHash, 0, numPostings-1);
return postingsHash;
}
void quickSort(int[] termIDs, int lo, int hi) {
if (lo >= hi)
return;
else if (hi == 1+lo) {
if (comparePostings(termIDs[lo], termIDs[hi]) > 0) {
final int tmp = termIDs[lo];
termIDs[lo] = termIDs[hi];
termIDs[hi] = tmp;
}
return;
}
int mid = (lo + hi) >>> 1;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp;
}
if (comparePostings(termIDs[mid], termIDs[hi]) > 0) {
int tmp = termIDs[mid];
termIDs[mid] = termIDs[hi];
termIDs[hi] = tmp;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp2 = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp2;
}
}
int left = lo + 1;
int right = hi - 1;
if (left >= right)
return;
int partition = termIDs[mid];
for (; ;) {
while (comparePostings(termIDs[right], partition) > 0)
--right;
while (left < right && comparePostings(termIDs[left], partition) <= 0)
++left;
if (left < right) {
int tmp = termIDs[left];
termIDs[left] = termIDs[right];
termIDs[right] = tmp;
--right;
} else {
break;
}
}
quickSort(termIDs, lo, left);
quickSort(termIDs, left + 1, hi);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(perThread.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(perThread.tr2, postingsArray.textStarts[term2]);
return termComp.compare(perThread.tr1, perThread.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriter.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriter.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
if (postingsArray == null) {
initPostingsArray();
}
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int code = textStart;
int hashPos = code & postingsHashMask;
assert !postingsCompacted;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && postingsArray.textStarts[termID] != textStart) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && postingsArray.textStarts[termID] != textStart);
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
// New posting
termID = numPostings++;
if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
assert termID >= 0;
postingsArray.textStarts[termID] = textStart;
assert postingsHash[hashPos] == -1;
postingsHash[hashPos] = termID;
if (numPostings == postingsHashHalfSize)
rehashPostings(2*postingsHashSize);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (DocumentsWriter.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
assert !postingsCompacted;
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE) {
// Not enough room in current block
if (utf8.length > DocumentsWriter.MAX_TERM_LENGTH_UTF8) {
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = utf8.length;
try {
utf8.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
} finally {
utf8.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
bytePool.nextBuffer();
}
// New posting
termID = numPostings++;
if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
assert termID != -1;
assert postingsHash[hashPos] == -1;
postingsHash[hashPos] = termID;
final byte[] text = bytePool.buffer;
final int textUpto = bytePool.byteUpto;
postingsArray.textStarts[termID] = textUpto + bytePool.byteOffset;
// We first encode the length, followed by the UTF8
// bytes. Length is encoded as vInt, but will consume
// 1 or 2 bytes at most (we reject too-long terms,
// above).
// encode length @ start of bytes
if (utf8.length < 128) {
// 1 byte to store length
text[textUpto] = (byte) utf8.length;
bytePool.byteUpto += utf8.length + 1;
System.arraycopy(utf8.bytes, 0, text, textUpto+1, utf8.length);
} else {
// 2 byte to store length
text[textUpto] = (byte) (0x80 | (utf8.length & 0x7f));
text[textUpto+1] = (byte) ((utf8.length>>7) & 0xff);
bytePool.byteUpto += utf8.length + 2;
System.arraycopy(utf8.bytes, 0, text, textUpto+2, utf8.length);
}
if (numPostings == postingsHashHalfSize) {
rehashPostings(2*postingsHashSize);
bytesUsed(2*numPostings * RamUsageEstimator.NUM_BYTES_INT);
}
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (DocumentsWriter.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
byte[] bytes = bytePool.buffers[upto >> DocumentsWriter.BYTE_BLOCK_SHIFT];
assert bytes != null;
int offset = upto & DocumentsWriter.BYTE_BLOCK_MASK;
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (perThread.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriter.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriter.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
Left
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHashPerField nextPerField;
final TermsHashPerThread perThread;
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
termBytesRef = perThread.termBytesRef;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
synchronized public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriter.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
// Not enough room in current block
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
} finally {
termBytesRef.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
assert bytes != null;
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHashPerField nextPerField;
final TermsHashPerThread perThread;
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
termBytesRef = perThread.termBytesRef;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
synchronized public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriter.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
// Not enough room in current block
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
} finally {
termBytesRef.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
assert bytes != null;
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
}
Right
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
final class TermsHashPerField extends InvertedDocConsumerPerField {
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
boolean postingsCompacted;
int numPostings;
private int postingsHashSize = 4;
private int postingsHashHalfSize = postingsHashSize/2;
private int postingsHashMask = postingsHashSize-1;
private int[] postingsHash;
ParallelPostingsArray postingsArray;
private final BytesRef utf8;
private Comparator<BytesRef> termComp;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
fieldState = docInverterPerField.fieldState;
this.consumer = termsHash.consumer.addField(this, fieldInfo);
initPostingsArray();
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
utf8 = termsHash.utf8;
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
private void initPostingsArray() {
postingsArray = consumer.createPostingsArray(2);
bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
void shrinkHash(int targetSize) {
assert postingsCompacted || numPostings == 0;
final int newSize = 4;
if (newSize != postingsHash.length) {
final long previousSize = postingsHash.length;
postingsHash = new int[newSize];
bytesUsed((newSize-previousSize)*RamUsageEstimator.NUM_BYTES_INT);
Arrays.fill(postingsHash, -1);
postingsHashSize = newSize;
postingsHashHalfSize = newSize/2;
postingsHashMask = newSize-1;
}
// Fully free the postings array on each flush:
if (postingsArray != null) {
bytesUsed(-postingsArray.bytesPerPosting() * postingsArray.size);
postingsArray = null;
}
}
public void reset() {
if (!postingsCompacted)
compactPostings();
assert numPostings <= postingsHash.length;
if (numPostings > 0) {
Arrays.fill(postingsHash, 0, numPostings, -1);
numPostings = 0;
}
postingsCompacted = false;
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
private final void growParallelPostingsArray() {
int oldSize = postingsArray.size;
this.postingsArray = this.postingsArray.grow();
bytesUsed(postingsArray.bytesPerPosting() * (postingsArray.size - oldSize));
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
this.termComp = termComp;
compactPostings();
quickSort(postingsHash, 0, numPostings-1);
return postingsHash;
}
void quickSort(int[] termIDs, int lo, int hi) {
if (lo >= hi)
return;
else if (hi == 1+lo) {
if (comparePostings(termIDs[lo], termIDs[hi]) > 0) {
final int tmp = termIDs[lo];
termIDs[lo] = termIDs[hi];
termIDs[hi] = tmp;
}
return;
}
int mid = (lo + hi) >>> 1;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp;
}
if (comparePostings(termIDs[mid], termIDs[hi]) > 0) {
int tmp = termIDs[mid];
termIDs[mid] = termIDs[hi];
termIDs[hi] = tmp;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp2 = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp2;
}
}
int left = lo + 1;
int right = hi - 1;
if (left >= right)
return;
int partition = termIDs[mid];
for (; ;) {
while (comparePostings(termIDs[right], partition) > 0)
--right;
while (left < right && comparePostings(termIDs[left], partition) <= 0)
++left;
if (left < right) {
int tmp = termIDs[left];
termIDs[left] = termIDs[right];
termIDs[right] = tmp;
--right;
} else {
break;
}
}
quickSort(termIDs, lo, left);
quickSort(termIDs, left + 1, hi);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
if (postingsArray == null) {
initPostingsArray();
}
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int code = textStart;
int hashPos = code & postingsHashMask;
assert !postingsCompacted;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && postingsArray.textStarts[termID] != textStart) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && postingsArray.textStarts[termID] != textStart);
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
// New posting
termID = numPostings++;
if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
assert termID >= 0;
postingsArray.textStarts[termID] = textStart;
assert postingsHash[hashPos] == -1;
postingsHash[hashPos] = termID;
if (numPostings == postingsHashHalfSize)
rehashPostings(2*postingsHashSize);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
assert !postingsCompacted;
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
// Not enough room in current block
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = utf8.length;
try {
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
} finally {
utf8.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
bytePool.nextBuffer();
}
// New posting
termID = numPostings++;
if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
assert termID != -1;
assert postingsHash[hashPos] == -1;
postingsHash[hashPos] = termID;
final byte[] text = bytePool.buffer;
final int textUpto = bytePool.byteUpto;
postingsArray.textStarts[termID] = textUpto + bytePool.byteOffset;
// We first encode the length, followed by the UTF8
// bytes. Length is encoded as vInt, but will consume
// 1 or 2 bytes at most (we reject too-long terms,
// above).
// encode length @ start of bytes
if (utf8.length < 128) {
// 1 byte to store length
text[textUpto] = (byte) utf8.length;
bytePool.byteUpto += utf8.length + 1;
System.arraycopy(utf8.bytes, 0, text, textUpto+1, utf8.length);
} else {
// 2 byte to store length
text[textUpto] = (byte) (0x80 | (utf8.length & 0x7f));
text[textUpto+1] = (byte) ((utf8.length>>7) & 0xff);
bytePool.byteUpto += utf8.length + 2;
System.arraycopy(utf8.bytes, 0, text, textUpto+2, utf8.length);
}
if (numPostings == postingsHashHalfSize) {
rehashPostings(2*postingsHashSize);
bytesUsed(2*numPostings * RamUsageEstimator.NUM_BYTES_INT);
}
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert bytes != null;
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
final class TermsHashPerField extends InvertedDocConsumerPerField {
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
boolean postingsCompacted;
int numPostings;
private int postingsHashSize = 4;
private int postingsHashHalfSize = postingsHashSize/2;
private int postingsHashMask = postingsHashSize-1;
private int[] postingsHash;
ParallelPostingsArray postingsArray;
private final BytesRef utf8;
private Comparator<BytesRef> termComp;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
fieldState = docInverterPerField.fieldState;
this.consumer = termsHash.consumer.addField(this, fieldInfo);
initPostingsArray();
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
utf8 = termsHash.utf8;
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
private void initPostingsArray() {
postingsArray = consumer.createPostingsArray(2);
bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
void shrinkHash(int targetSize) {
assert postingsCompacted || numPostings == 0;
final int newSize = 4;
if (newSize != postingsHash.length) {
final long previousSize = postingsHash.length;
postingsHash = new int[newSize];
bytesUsed((newSize-previousSize)*RamUsageEstimator.NUM_BYTES_INT);
Arrays.fill(postingsHash, -1);
postingsHashSize = newSize;
postingsHashHalfSize = newSize/2;
postingsHashMask = newSize-1;
}
// Fully free the postings array on each flush:
if (postingsArray != null) {
bytesUsed(-postingsArray.bytesPerPosting() * postingsArray.size);
postingsArray = null;
}
}
public void reset() {
if (!postingsCompacted)
compactPostings();
assert numPostings <= postingsHash.length;
if (numPostings > 0) {
Arrays.fill(postingsHash, 0, numPostings, -1);
numPostings = 0;
}
postingsCompacted = false;
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
private final void growParallelPostingsArray() {
int oldSize = postingsArray.size;
this.postingsArray = this.postingsArray.grow();
bytesUsed(postingsArray.bytesPerPosting() * (postingsArray.size - oldSize));
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
this.termComp = termComp;
compactPostings();
quickSort(postingsHash, 0, numPostings-1);
return postingsHash;
}
void quickSort(int[] termIDs, int lo, int hi) {
if (lo >= hi)
return;
else if (hi == 1+lo) {
if (comparePostings(termIDs[lo], termIDs[hi]) > 0) {
final int tmp = termIDs[lo];
termIDs[lo] = termIDs[hi];
termIDs[hi] = tmp;
}
return;
}
int mid = (lo + hi) >>> 1;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp;
}
if (comparePostings(termIDs[mid], termIDs[hi]) > 0) {
int tmp = termIDs[mid];
termIDs[mid] = termIDs[hi];
termIDs[hi] = tmp;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp2 = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp2;
}
}
int left = lo + 1;
int right = hi - 1;
if (left >= right)
return;
int partition = termIDs[mid];
for (; ;) {
while (comparePostings(termIDs[right], partition) > 0)
--right;
while (left < right && comparePostings(termIDs[left], partition) <= 0)
++left;
if (left < right) {
int tmp = termIDs[left];
termIDs[left] = termIDs[right];
termIDs[right] = tmp;
--right;
} else {
break;
}
}
quickSort(termIDs, lo, left);
quickSort(termIDs, left + 1, hi);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
if (postingsArray == null) {
initPostingsArray();
}
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int code = textStart;
int hashPos = code & postingsHashMask;
assert !postingsCompacted;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && postingsArray.textStarts[termID] != textStart) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && postingsArray.textStarts[termID] != textStart);
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
// New posting
termID = numPostings++;
if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
assert termID >= 0;
postingsArray.textStarts[termID] = textStart;
assert postingsHash[hashPos] == -1;
postingsHash[hashPos] = termID;
if (numPostings == postingsHashHalfSize)
rehashPostings(2*postingsHashSize);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
assert !postingsCompacted;
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
// Not enough room in current block
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = utf8.length;
try {
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
} finally {
utf8.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
bytePool.nextBuffer();
}
// New posting
termID = numPostings++;
if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
assert termID != -1;
assert postingsHash[hashPos] == -1;
postingsHash[hashPos] = termID;
final byte[] text = bytePool.buffer;
final int textUpto = bytePool.byteUpto;
postingsArray.textStarts[termID] = textUpto + bytePool.byteOffset;
// We first encode the length, followed by the UTF8
// bytes. Length is encoded as vInt, but will consume
// 1 or 2 bytes at most (we reject too-long terms,
// above).
// encode length @ start of bytes
if (utf8.length < 128) {
// 1 byte to store length
text[textUpto] = (byte) utf8.length;
bytePool.byteUpto += utf8.length + 1;
System.arraycopy(utf8.bytes, 0, text, textUpto+1, utf8.length);
} else {
// 2 byte to store length
text[textUpto] = (byte) (0x80 | (utf8.length & 0x7f));
text[textUpto+1] = (byte) ((utf8.length>>7) & 0xff);
bytePool.byteUpto += utf8.length + 2;
System.arraycopy(utf8.bytes, 0, text, textUpto+2, utf8.length);
}
if (numPostings == postingsHashHalfSize) {
rehashPostings(2*postingsHashSize);
bytesUsed(2*numPostings * RamUsageEstimator.NUM_BYTES_INT);
}
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert bytes != null;
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
MergeMethods
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
termBytesRef = perThread.termBytesRef;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
>>>>>>> YOURS
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
<<<<<<< MINE
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
=======
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
>>>>>>> YOURS
// Not enough room in current block
<<<<<<< MINE
=======
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
>>>>>>> YOURS
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
<<<<<<< MINE
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
=======
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
>>>>>>> YOURS
} finally {
termBytesRef.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
>>>>>>> YOURS
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
<<<<<<< MINE
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
=======
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
>>>>>>> YOURS
assert bytes != null;
<<<<<<< MINE
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
=======
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
>>>>>>> YOURS
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
termBytesRef = perThread.termBytesRef;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
>>>>>>> YOURS
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
<<<<<<< MINE
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
=======
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
>>>>>>> YOURS
// Not enough room in current block
<<<<<<< MINE
=======
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
>>>>>>> YOURS
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
<<<<<<< MINE
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
=======
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
>>>>>>> YOURS
} finally {
termBytesRef.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
>>>>>>> YOURS
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
<<<<<<< MINE
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
=======
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
>>>>>>> YOURS
assert bytes != null;
<<<<<<< MINE
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
=======
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
>>>>>>> YOURS
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
KeepBothMethods
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
termBytesRef = perThread.termBytesRef;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
fieldState = docInverterPerField.fieldState;
this.consumer = termsHash.consumer.addField(this, fieldInfo);
initPostingsArray();
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
utf8 = termsHash.utf8;
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
>>>>>>> YOURS
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
<<<<<<< MINE
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
=======
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
>>>>>>> YOURS
// Not enough room in current block
<<<<<<< MINE
=======
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
>>>>>>> YOURS
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
<<<<<<< MINE
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
=======
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
>>>>>>> YOURS
} finally {
termBytesRef.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
>>>>>>> YOURS
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
<<<<<<< MINE
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
=======
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
>>>>>>> YOURS
assert bytes != null;
<<<<<<< MINE
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
=======
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
>>>>>>> YOURS
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
termBytesRef = perThread.termBytesRef;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
fieldState = docInverterPerField.fieldState;
this.consumer = termsHash.consumer.addField(this, fieldInfo);
initPostingsArray();
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
utf8 = termsHash.utf8;
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
>>>>>>> YOURS
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
<<<<<<< MINE
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
=======
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
>>>>>>> YOURS
// Not enough room in current block
<<<<<<< MINE
=======
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
>>>>>>> YOURS
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
<<<<<<< MINE
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
=======
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
>>>>>>> YOURS
} finally {
termBytesRef.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
>>>>>>> YOURS
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
<<<<<<< MINE
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
=======
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
>>>>>>> YOURS
assert bytes != null;
<<<<<<< MINE
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
=======
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
>>>>>>> YOURS
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
Safe
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
<<<<<<< MINE
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
fieldState = docInverterPerField.fieldState;
this.consumer = termsHash.consumer.addField(this, fieldInfo);
initPostingsArray();
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
utf8 = termsHash.utf8;
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
=======
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
termBytesRef = perThread.termBytesRef;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
>>>>>>> YOURS
// sugar: just forwards to DW
<<<<<<< MINE
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
=======
>>>>>>> YOURS
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
<<<<<<< MINE
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
=======
>>>>>>> YOURS
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
<<<<<<< MINE
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
=======
>>>>>>> YOURS
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
>>>>>>> YOURS
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
<<<<<<< MINE
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
=======
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
>>>>>>> YOURS
// Not enough room in current block
<<<<<<< MINE
=======
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
>>>>>>> YOURS
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
<<<<<<< MINE
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
=======
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
>>>>>>> YOURS
} finally {
termBytesRef.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
>>>>>>> YOURS
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
<<<<<<< MINE
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
=======
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
>>>>>>> YOURS
assert bytes != null;
<<<<<<< MINE
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
=======
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
>>>>>>> YOURS
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
<<<<<<< MINE
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
fieldState = docInverterPerField.fieldState;
this.consumer = termsHash.consumer.addField(this, fieldInfo);
initPostingsArray();
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
utf8 = termsHash.utf8;
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
=======
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
termBytesRef = perThread.termBytesRef;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
>>>>>>> YOURS
// sugar: just forwards to DW
<<<<<<< MINE
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
=======
>>>>>>> YOURS
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
<<<<<<< MINE
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
=======
>>>>>>> YOURS
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
<<<<<<< MINE
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
=======
>>>>>>> YOURS
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
>>>>>>> YOURS
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
<<<<<<< MINE
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
=======
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
>>>>>>> YOURS
// Not enough room in current block
<<<<<<< MINE
=======
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
>>>>>>> YOURS
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
<<<<<<< MINE
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
=======
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
>>>>>>> YOURS
} finally {
termBytesRef.length = saved;
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
>>>>>>> YOURS
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
<<<<<<< MINE
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
=======
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
>>>>>>> YOURS
assert bytes != null;
<<<<<<< MINE
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
=======
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
>>>>>>> YOURS
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
Unstructured
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
<<<<<<< MINE
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
=======
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
>>>>>>> YOURS
fieldState = docInverterPerField.fieldState;
<<<<<<< MINE
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
=======
this.consumer = termsHash.consumer.addField(this, fieldInfo);
initPostingsArray();
>>>>>>> YOURS
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
<<<<<<< MINE
termBytesRef = perThread.termBytesRef;
=======
utf8 = termsHash.utf8;
>>>>>>> YOURS
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
<<<<<<< MINE
=======
private void initPostingsArray() {
postingsArray = consumer.createPostingsArray(2);
bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
>>>>>>> YOURS
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
<<<<<<< MINE
=======
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
>>>>>>> YOURS
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
<<<<<<< MINE
=======
void quickSort(int[] termIDs, int lo, int hi) {
if (lo >= hi)
return;
else if (hi == 1+lo) {
if (comparePostings(termIDs[lo], termIDs[hi]) > 0) {
final int tmp = termIDs[lo];
termIDs[lo] = termIDs[hi];
termIDs[hi] = tmp;
}
return;
}
int mid = (lo + hi) >>> 1;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp;
}
if (comparePostings(termIDs[mid], termIDs[hi]) > 0) {
int tmp = termIDs[mid];
termIDs[mid] = termIDs[hi];
termIDs[hi] = tmp;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp2 = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp2;
}
}
int left = lo + 1;
int right = hi - 1;
if (left >= right)
return;
int partition = termIDs[mid];
for (; ;) {
while (comparePostings(termIDs[right], partition) > 0)
--right;
while (left < right && comparePostings(termIDs[left], partition) <= 0)
++left;
if (left < right) {
int tmp = termIDs[left];
termIDs[left] = termIDs[right];
termIDs[right] = tmp;
--right;
} else {
break;
}
}
quickSort(termIDs, lo, left);
quickSort(termIDs, left + 1, hi);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
>>>>>>> YOURS
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
>>>>>>> YOURS
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
<<<<<<< MINE
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
// Not enough room in current block
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
} finally {
termBytesRef.length = saved;
=======
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
// Not enough room in current block
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = utf8.length;
try {
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
} finally {
utf8.length = saved;
}
}
consumer.skippingLongTerm();
return;
>>>>>>> YOURS
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
>>>>>>> YOURS
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
<<<<<<< MINE
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
=======
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
>>>>>>> YOURS
assert bytes != null;
<<<<<<< MINE
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
=======
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
>>>>>>> YOURS
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
<<<<<<< MINE
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
=======
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
>>>>>>> YOURS
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
}package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
final class TermsHashPerField extends InvertedDocConsumerPerField {
private static final int HASH_INIT_SIZE = 4;
final TermsHashConsumerPerField consumer;
final TermsHash termsHash;
final TermsHashPerField nextPerField;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
TermToBytesRefAttribute termAtt;
// Copied from our perThread
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final ByteBlockPool termBytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private final BytesRef termBytesRef;
private final AtomicLong bytesUsed;
<<<<<<< MINE
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
bytePool = perThread.bytePool;
termBytePool = perThread.termBytePool;
docState = perThread.docState;
bytesUsed = perThread.termsHash.trackAllocations?perThread.termsHash.docWriter.bytesUsed:new AtomicLong();
=======
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
postingsHash = new int[postingsHashSize];
Arrays.fill(postingsHash, -1);
bytesUsed(postingsHashSize * RamUsageEstimator.NUM_BYTES_INT);
>>>>>>> YOURS
fieldState = docInverterPerField.fieldState;
<<<<<<< MINE
this.consumer = perThread.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
=======
this.consumer = termsHash.consumer.addField(this, fieldInfo);
initPostingsArray();
>>>>>>> YOURS
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
<<<<<<< MINE
termBytesRef = perThread.termBytesRef;
=======
utf8 = termsHash.utf8;
>>>>>>> YOURS
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
<<<<<<< MINE
=======
private void initPostingsArray() {
postingsArray = consumer.createPostingsArray(2);
bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
}
// sugar: just forwards to DW
private void bytesUsed(long size) {
if (termsHash.trackAllocations) {
termsHash.docWriter.bytesUsed(size);
}
}
>>>>>>> YOURS
void shrinkHash(int targetSize) {
// Fully free the bytesHash on each flush but keep the pool untouched
// bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
bytesHash.clear(false);
}
public void reset() {
bytesHash.clear(false);
if (nextPerField != null)
nextPerField.reset();
}
@Override
public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int intStart = postingsArray.intStarts[termID];
final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
<<<<<<< MINE
=======
private void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != -1) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = -1;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
>>>>>>> YOURS
/** Collapse the hash table & sort in-place. */
public int[] sortPostings(Comparator<BytesRef> termComp) {
return bytesHash.sort(termComp);
}
<<<<<<< MINE
=======
void quickSort(int[] termIDs, int lo, int hi) {
if (lo >= hi)
return;
else if (hi == 1+lo) {
if (comparePostings(termIDs[lo], termIDs[hi]) > 0) {
final int tmp = termIDs[lo];
termIDs[lo] = termIDs[hi];
termIDs[hi] = tmp;
}
return;
}
int mid = (lo + hi) >>> 1;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp;
}
if (comparePostings(termIDs[mid], termIDs[hi]) > 0) {
int tmp = termIDs[mid];
termIDs[mid] = termIDs[hi];
termIDs[hi] = tmp;
if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
int tmp2 = termIDs[lo];
termIDs[lo] = termIDs[mid];
termIDs[mid] = tmp2;
}
}
int left = lo + 1;
int right = hi - 1;
if (left >= right)
return;
int partition = termIDs[mid];
for (; ;) {
while (comparePostings(termIDs[right], partition) > 0)
--right;
while (left < right && comparePostings(termIDs[left], partition) <= 0)
++left;
if (left < right) {
int tmp = termIDs[left];
termIDs[left] = termIDs[right];
termIDs[right] = tmp;
--right;
} else {
break;
}
}
quickSort(termIDs, lo, left);
quickSort(termIDs, left + 1, hi);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(int term1, int term2) {
if (term1 == term2) {
// Our quicksort does this, eg during partition
return 0;
}
termBytePool.setBytesRef(termsHash.tr1, postingsArray.textStarts[term1]);
termBytePool.setBytesRef(termsHash.tr2, postingsArray.textStarts[term2]);
return termComp.compare(termsHash.tr1, termsHash.tr2);
}
/** Test whether the text for current RawPostingList p equals
* current tokenText in utf8. */
private boolean postingEquals(final int termID) {
final int textStart = postingsArray.textStarts[termID];
final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
assert text != null;
int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final int len;
if ((text[pos] & 0x80) == 0) {
// length is 1 byte
len = text[pos];
pos += 1;
} else {
// length is 2 bytes
len = (text[pos]&0x7f) + ((text[pos+1]&0xff)<<7);
pos += 2;
}
if (len == utf8.length) {
final byte[] utf8Bytes = utf8.bytes;
for(int tokenPos=0;tokenPos<utf8.length;pos++,tokenPos++) {
if (utf8Bytes[tokenPos] != text[pos]) {
return false;
}
}
return true;
} else {
return false;
}
}
>>>>>>> YOURS
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
bytesHash.reinit();
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE)
intPool.nextBuffer();
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
>>>>>>> YOURS
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
<<<<<<< MINE
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
}catch (MaxBytesLengthExceededException e) {
// Not enough room in current block
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = termBytesRef.length;
try {
termBytesRef.length = Math.min(30, DocumentsWriter.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = termBytesRef.toString();
} finally {
termBytesRef.length = saved;
=======
int code = termAtt.toBytesRef(utf8);
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
int termID = postingsHash[hashPos];
if (termID != -1 && !postingEquals(termID)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
termID = postingsHash[hashPos];
} while (termID != -1 && !postingEquals(termID));
}
if (termID == -1) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen2 = 2+utf8.length;
if (textLen2 + bytePool.byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE) {
// Not enough room in current block
if (utf8.length > DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8) {
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null) {
final int saved = utf8.length;
try {
utf8.length = Math.min(30, DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8);
docState.maxTermPrefix = utf8.toString();
} finally {
utf8.length = saved;
}
}
consumer.skippingLongTerm();
return;
>>>>>>> YOURS
}
}
consumer.skippingLongTerm();
return;
}
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriterRAMAllocator.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
<<<<<<< MINE
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
=======
if (DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
>>>>>>> YOURS
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
final int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
if (doNextCall)
nextPerField.add(postingsArray.textStarts[termID]);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
<<<<<<< MINE
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
=======
byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
>>>>>>> YOURS
assert bytes != null;
<<<<<<< MINE
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
=======
int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
>>>>>>> YOURS
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final AtomicLong bytesUsed;
<<<<<<< MINE
private PostingsBytesStartArray(
TermsHashPerField perField, AtomicLong bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if(perField.postingsArray == null) {
perField.postingsArray = perField.consumer.createPostingsArray(2);
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
=======
final int newMask = newSize-1;
int[] newHash = new int[newSize];
Arrays.fill(newHash, -1);
for(int i=0;i<postingsHashSize;i++) {
int termID = postingsHash[i];
if (termID != -1) {
int code;
if (termsHash.primary) {
final int textStart = postingsArray.textStarts[termID];
final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
code = 0;
final int len;
int pos;
if ((text[start] & 0x80) == 0) {
// length is 1 byte
len = text[start];
pos = start+1;
} else {
len = (text[start]&0x7f) + ((text[start+1]&0xff)<<7);
pos = start+2;
}
final int endPos = pos+len;
while(pos < endPos) {
code = (code*31) + text[pos++];
}
} else {
code = postingsArray.textStarts[termID];
}
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != -1) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != -1);
}
newHash[hashPos] = termID;
>>>>>>> YOURS
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
bytesUsed
.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if(perField.postingsArray != null) {
bytesUsed.addAndGet(-perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
perField.postingsArray = null;
}
return null;
}
@Override
public AtomicLong bytesUsed() {
return bytesUsed;
}
}
}
Diff Result
No diff
Case 30 - java_lucenesolr.rev_2ede7_249fd..TestIndexWriter.java
Base
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
public TestIndexWriter(String name) {
super(name);
}
public void testDocCount() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
}
private static void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
public void testAddIndexOnDiskFull() throws IOException
{
int START_COUNT = 57;
int NUM_DIR = 50;
int END_COUNT = START_COUNT + NUM_DIR*25;
// Build up a bunch of dirs that have indexes which we
// will then merge together by calling addIndexes(*):
Directory[] dirs = new Directory[NUM_DIR];
long inputDiskUsage = 0;
for(int i=0;i<NUM_DIR;i++) {
dirs[i] = new RAMDirectory();
IndexWriter writer = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<25;j++) {
addDocWithIndex(writer, 25*i+j);
}
writer.close();
String[] files = dirs[i].listAll();
for(int j=0;j<files.length;j++) {
inputDiskUsage += dirs[i].fileLength(files[j]);
}
}
// Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexesNoOptimize into a copy of this:
RAMDirectory startDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(startDir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<START_COUNT;j++) {
addDocWithIndex(writer, j);
}
writer.close();
// Make sure starting index seems to be working properly:
Term searchTerm = new Term("content", "aaa");
IndexReader reader = IndexReader.open(startDir, true);
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 57, hits.length);
searcher.close();
reader.close();
// Iterate with larger and larger amounts of free
// disk space. With little free disk space,
// addIndexes will certainly run out of space &
// fail. Verify that when this happens, index is
// not corrupt and index in fact has added no
// documents. Then, we increase disk space by 2000
// bytes each iteration. At some point there is
// enough free disk space and addIndexes should
// succeed and index should show all documents were
// added.
// String[] files = startDir.listAll();
long diskUsage = startDir.sizeInBytes();
long startDiskUsage = 0;
String[] files = startDir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += startDir.fileLength(files[i]);
}
for(int iter=0;iter<3;iter++) {
if (VERBOSE)
System.out.println("TEST: iter=" + iter);
// Start with 100 bytes more than we are currently using:
long diskFree = diskUsage+100;
int method = iter;
boolean success = false;
boolean done = false;
String methodName;
if (0 == method) {
methodName = "addIndexes(Directory[]) + optimize()";
} else if (1 == method) {
methodName = "addIndexes(IndexReader[])";
} else {
methodName = "addIndexes(Directory[])";
}
while(!done) {
// Make a new dir that will enforce disk usage:
MockRAMDirectory dir = new MockRAMDirectory(startDir);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IOException err = null;
MergeScheduler ms = writer.getConfig().getMergeScheduler();
for(int x=0;x<2;x++) {
if (ms instanceof ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
if (0 == x)
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
else
((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
// Two loops: first time, limit disk space &
// throw random IOExceptions; second time, no
// disk space limit:
double rate = 0.05;
double diskRatio = ((double) diskFree)/diskUsage;
long thisDiskFree;
String testName = null;
if (0 == x) {
thisDiskFree = diskFree;
if (diskRatio >= 2.0) {
rate /= 2;
}
if (diskRatio >= 4.0) {
rate /= 2;
}
if (diskRatio >= 6.0) {
rate = 0.0;
}
if (VERBOSE)
testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
} else {
thisDiskFree = 0;
rate = 0.0;
if (VERBOSE)
testName = "disk full test " + methodName + " with unlimited disk space";
}
if (VERBOSE)
System.out.println("\ncycle: " + testName);
dir.setMaxSizeInBytes(thisDiskFree);
dir.setRandomIOExceptionRate(rate, diskFree);
try {
if (0 == method) {
writer.addIndexes(dirs);
writer.optimize();
} else if (1 == method) {
IndexReader readers[] = new IndexReader[dirs.length];
for(int i=0;i<dirs.length;i++) {
readers[i] = IndexReader.open(dirs[i], true);
}
try {
writer.addIndexes(readers);
} finally {
for(int i=0;i<dirs.length;i++) {
readers[i].close();
}
}
} else {
writer.addIndexes(dirs);
}
success = true;
if (VERBOSE) {
System.out.println(" success!");
}
if (0 == x) {
done = true;
}
} catch (IOException e) {
success = false;
err = e;
if (VERBOSE) {
System.out.println(" hit IOException: " + e);
e.printStackTrace(System.out);
}
if (1 == x) {
e.printStackTrace(System.out);
fail(methodName + " hit IOException after disk space was freed up");
}
}
// Make sure all threads from
// ConcurrentMergeScheduler are done
_TestUtil.syncConcurrentMerges(writer);
if (VERBOSE) {
System.out.println(" now test readers");
}
// Finally, verify index is not corrupt, and, if
// we succeeded, we see all docs added, and if we
// failed, we see either all docs or no docs added
// (transactional semantics):
try {
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when creating IndexReader: " + e);
}
int result = reader.docFreq(searchTerm);
if (success) {
if (result != START_COUNT) {
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
}
} else {
// On hitting exception we still may have added
// all docs:
if (result != START_COUNT && result != END_COUNT) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
}
}
searcher = new IndexSearcher(reader);
try {
hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when searching: " + e);
}
int result2 = hits.length;
if (success) {
if (result2 != result) {
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
} else {
// On hitting exception we still may have added
// all docs:
if (result2 != result) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
}
searcher.close();
reader.close();
if (VERBOSE) {
System.out.println(" count is " + result);
}
if (done || result == END_COUNT) {
break;
}
}
if (VERBOSE) {
System.out.println(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
}
if (done) {
// Javadocs state that temp free Directory space
// required is at most 2X total input size of
// indices so let's make sure:
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
"starting disk usage = " + startDiskUsage + " bytes; " +
"input index disk usage = " + inputDiskUsage + " bytes",
(dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
}
// Make sure we don't hit disk full during close below:
dir.setMaxSizeInBytes(0);
dir.setRandomIOExceptionRate(0.0, 0);
writer.close();
// Wait for all BG threads to finish else
// dir.close() will throw IOException because
// there are still open files
_TestUtil.syncConcurrentMerges(ms);
dir.close();
// Try again with 5000 more bytes of free space:
diskFree += 5000;
}
}
startDir.close();
}
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public void testAddDocumentOnDiskFull() throws IOException {
for(int pass=0;pass<2;pass++) {
if (VERBOSE)
System.out.println("TEST: pass=" + pass);
boolean doAbort = pass == 1;
long diskFree = 200;
while(true) {
if (VERBOSE)
System.out.println("TEST: cycle: diskFree=" + diskFree);
MockRAMDirectory dir = new MockRAMDirectory();
dir.setMaxSizeInBytes(diskFree);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
MergeScheduler ms = writer.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
boolean hitError = false;
try {
for(int i=0;i<200;i++) {
addDoc(writer);
}
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on addDoc");
e.printStackTrace(System.out);
}
hitError = true;
}
if (hitError) {
if (doAbort) {
writer.rollback();
} else {
try {
writer.close();
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on close");
e.printStackTrace(System.out);
}
dir.setMaxSizeInBytes(0);
writer.close();
}
}
_TestUtil.syncConcurrentMerges(ms);
assertNoUnreferencedFiles(dir, "after disk full during addDocument");
// Make sure reader can open the index:
IndexReader.open(dir, true).close();
dir.close();
// Now try again w/ more space:
diskFree += 500;
} else {
_TestUtil.syncConcurrentMerges(writer);
dir.close();
break;
}
}
}
}
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
sis = new SegmentInfos();
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
writer.close();
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
}
dir.resetMaxUsedSizeInBytes();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimized used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (2*startDiskUsage) + " (= 2X starting usage)",
maxDiskUsage <= 2*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
File indexDir = _TestUtil.getTempDir("lucenetestindexwriter");
try {
Directory dir = FSDirectory.open(indexDir);
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
} finally {
rmDir(indexDir);
}
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testSimulatedCorruptIndex1() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
dir.deleteFile(fileNameIn);
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
}
public void testChangesAfterClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
public void testSimulatedCorruptIndex2() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
if (files[i].endsWith(".cfs")) {
dir.deleteFile(files[i]);
break;
}
}
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
}
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockRAMDirectory to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10).setMergeScheduler(
new SerialMergeScheduler()));
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 2X disk usage normally we allow 100X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 100X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage,
midDiskUsage < 100*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage,
endDiskUsage < 100*startDiskUsage);
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
}
public void testIndexNoDocuments() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
}
public void testManyFields() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.000001));
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
public void testChangingRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
if (j == 1)
lastFlushCount = flushCount;
else if (j < 10)
// No new files should be created
assertEquals(flushCount, lastFlushCount);
else if (10 == j) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (j < 20) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 30) {
assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (j < 40) {
assertTrue(flushCount> lastFlushCount);
lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 50) {
assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
dir.close();
}
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testChangingRAMBuffer2() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setMaxBufferedDeleteTerms(
10).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
writer.deleteDocuments(new Term("field", "aaa" + j));
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
if (j == 1)
lastFlushCount = flushCount;
else if (j < 10) {
// No new files should be created
assertEquals(flushCount, lastFlushCount);
} else if (10 == j) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(1);
} else if (j < 20) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 30) {
assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
writer.setMaxBufferedDeleteTerms(1);
} else if (j < 40) {
assertTrue(flushCount> lastFlushCount);
lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 50) {
assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
dir.close();
}
public void testDiverseDocs() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
Random rand = newRandom();
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
TermDocs td = reader.termDocs(t);
td.next();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends RAMDirectory {
private LockFactory myLockFactory;
MyRAMDirectory() {
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assert sis.size() == 10;
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = new MockRAMDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(101);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
private void rmDir(File dir) {
File[] files = dir.listFiles();
if (files != null) {
for (int i = 0; i < files.length; i++) {
files[i].delete();
}
}
dir.delete();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
public void testNoTermVectorAfterTermVector() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.optimize();
iw.close();
dir.close();
}
// LUCENE-1010
public void testNoTermVectorAfterTermVectorMerge() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
iw.commit();
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
iw.optimize();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.optimize();
iw.close();
dir.close();
}
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
TermDocs tdocs = reader.termDocs(t);
int count = 0;
while(tdocs.next()) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()))
sawAppend = true;
if ("doFlush".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testDocumentsWriterAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
failure.setDoFail();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
try {
writer.addDocument(doc);
} catch (IOException ioe) {
// only one flush should fail:
assertFalse(hitError);
hitError = true;
}
}
assertTrue(hitError);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(198, reader.docFreq(new Term("content", "aa")));
reader.close();
}
private class CrashingFilter extends TokenFilter {
String fieldName;
int count;
public CrashingFilter(String fieldName, TokenStream input) {
super(input);
this.fieldName = fieldName;
}
@Override
public boolean incrementToken() throws IOException {
if (this.fieldName.equals("crash") && count++ >= 4)
throw new IOException("I'm experiencing problems");
return input.incrementToken();
}
@Override
public void reset() throws IOException {
super.reset();
count = 0;
}
}
public void testDocumentsWriterExceptions() throws IOException {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
int expected = 3+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(1, numDel);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
analyzer).setMaxBufferedDocs(10));
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected = 19+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testDocumentsWriterExceptionThreads() throws Exception {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
final int NUM_THREAD = 3;
final int NUM_ITER = 100;
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
{
final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
final int finalI = i;
Thread[] threads = new Thread[NUM_THREAD];
for(int t=0;t<NUM_THREAD;t++) {
threads[t] = new Thread() {
@Override
public void run() {
try {
for(int iter=0;iter<NUM_ITER;iter++) {
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == finalI) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
}
} catch (Throwable t) {
synchronized(this) {
System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
t.printStackTrace(System.out);
}
fail();
}
}
};
threads[t].start();
}
for(int t=0;t<NUM_THREAD;t++)
threads[t].join();
writer.close();
}
IndexReader reader = IndexReader.open(dir, true);
int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(NUM_THREAD*NUM_ITER, numDel);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected += 17-NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testVariableSchema() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
lmp.setUseCompoundDocStore(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
lmp2.setUseCompoundDocStore(false);
writer.optimize();
writer.close();
}
}
}
public void testNoWaitClose() throws Throwable {
RAMDirectory directory = new MockRAMDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE)
.setMaxBufferedDocs(2);
if (pass == 2) {
conf.setMergeScheduler(new SerialMergeScheduler());
}
IndexWriter writer = new IndexWriter(directory, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
//System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
for(int iter=0;iter<10;iter++) {
//System.out.println("TEST: iter=" + iter);
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0)
throw failure.get(0);
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
}
writer.close();
}
directory.close();
}
// Used by test cases below
private class IndexerThread extends Thread {
boolean diskFull;
Throwable error;
AlreadyClosedException ace;
IndexWriter writer;
boolean noErrors;
volatile int addCount;
public IndexerThread(IndexWriter writer, boolean noErrors) {
this.writer = writer;
this.noErrors = noErrors;
}
@Override
public void run() {
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
int idUpto = 0;
int fullCount = 0;
final long stopTime = System.currentTimeMillis() + 200;
do {
try {
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
addCount++;
} catch (IOException ioe) {
//System.out.println(Thread.currentThread().getName() + ": hit exc");
//ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at") ||
ioe.getMessage().equals("now failing on purpose")) {
diskFull = true;
try {
Thread.sleep(1);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
if (fullCount++ >= 5)
break;
} else {
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
ioe.printStackTrace(System.out);
error = ioe;
}
break;
}
} catch (Throwable t) {
//t.printStackTrace(System.out);
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
t.printStackTrace(System.out);
error = t;
}
break;
}
} while(System.currentTimeMillis() < stopTime);
}
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
TermDocs tdocs = reader.termDocs(new Term("field", "aaa"));
int count = 0;
while(tdocs.next()) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
public void testImmediateDiskFull() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Without fix for LUCENE-1130: this call will hang:
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
try {
writer.close(false);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
public void testImmediateDiskFullWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<10;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
dir.setMaxSizeInBytes(4*1024+20*iter);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
}
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"flushDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
// Runs test, with one thread, using the specific failure
// to trigger an IOException
public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<6;i++)
writer.addDocument(doc);
dir.failOn(failure);
failure.setDoFail();
try {
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
fail("did not hit exception");
} catch (IOException ioe) {
}
failure.clearDoFail();
writer.addDocument(doc);
writer.close(false);
}
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
public void _testMultipleThreadsFailure(MockRAMDirectory.Failure failure) throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<2;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
Thread.sleep(10);
dir.failOn(failure);
failure.setDoFail();
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
boolean success = false;
try {
writer.close(false);
success = true;
} catch (IOException ioe) {
failure.clearDoFail();
writer.close(false);
}
if (success) {
IndexReader reader = IndexReader.open(dir, true);
for(int j=0;j<reader.maxDoc();j++) {
if (!reader.isDeleted(j)) {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
}
dir.close();
}
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbort() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
}
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("closeDocStore".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStore() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
}
// Throws IOException during DocumentsWriter.writeSegment
private static class FailOnlyInWriteSegment extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInWriteSegment(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegment() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
public void testSegmentsChecksumError() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
IndexInput in = dir.openInput(segmentsFileName);
IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
out.copyBytes(in, in.length()-1);
byte b = in.readByte();
out.writeByte((byte) (1+b));
out.close();
in.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail("segmentInfos failed to retry fallback to correct segments_N file");
}
reader.close();
}
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// Throws IOException during MockRAMDirectory.sync
private static class FailOnlyInSync extends MockRAMDirectory.Failure {
boolean didFail;
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if (doFail && "org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
didFail = true;
throw new IOException("now failing on purpose during sync");
}
}
}
}
}
// LUCENE-1044: test exception during sync
public void testExceptionDuringSync() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInSync failure = new FailOnlyInSync();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
failure.setDoFail();
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
for (int i = 0; i < 23; i++) {
addDoc(writer);
if ((i-1)%2 == 0) {
try {
writer.commit();
} catch (IOException ioe) {
// expected
}
}
}
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
assertTrue(failure.didFail);
failure.clearDoFail();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption() throws IOException {
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<reader.numDocs();i++) {
reader.document(i);
reader.getTermFreqVectors(i);
}
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Directory[] indexDirs = {new MockRAMDirectory(dir)};
writer.addIndexes(indexDirs);
writer.optimize();
writer.close();
}
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption2() throws IOException {
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.getTermFreqVectors(0)==null);
assertTrue(reader.getTermFreqVectors(1)==null);
assertTrue(reader.getTermFreqVectors(2)!=null);
reader.close();
}
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(new LogDocMergePolicy()));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
for(int i=0;i<6;i++)
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<10;i++) {
reader.getTermFreqVectors(i);
reader.document(i);
}
reader.close();
dir.close();
}
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// Force many merges to happen
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
}
// LUCENE-1198
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriter.ThreadState.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
public void testExceptionJustBeforeFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
Document crashDoc = new Document();
crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
Field.Index.ANALYZED));
try {
w.addDocument(crashDoc, analyzer);
fail("did not hit expected exception");
} catch (IOException ioe) {
// expected
}
w.addDocument(doc);
w.close();
dir.close();
}
private static final class MockIndexWriter2 extends IndexWriter {
public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
boolean failed;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("startMergeInit")) {
failed = true;
throw new RuntimeException("intentionally failing");
}
return true;
}
}
// LUCENE-1210
public void testExceptionOnMergeInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
w.doFail = true;
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
for(int i=0;i<10;i++)
try {
w.addDocument(doc);
} catch (RuntimeException re) {
break;
}
((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
assertTrue(w.failed);
w.close();
dir.close();
}
private static final class MockIndexWriter3 extends IndexWriter {
public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
}
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter3 w = new MockIndexWriter3(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
private static class FailOnlyInCommit extends MockRAMDirectory.Failure {
boolean fail1, fail2;
@Override
public void eval(MockRAMDirectory dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean isCommit = false;
boolean isDelete = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
isCommit = true;
if ("org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
isDelete = true;
}
if (isCommit) {
if (!isDelete) {
fail1 = true;
throw new RuntimeException("now fail first");
} else {
fail2 = true;
throw new IOException("now fail during delete");
}
}
}
}
// LUCENE-1214
public void testExceptionsDuringCommit() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInCommit failure = new FailOnlyInCommit();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
dir.failOn(failure);
try {
w.close();
fail();
} catch (IOException ioe) {
fail("expected only RuntimeException");
} catch (RuntimeException re) {
// Expected
}
assertTrue(failure.fail1 && failure.fail2);
w.rollback();
dir.close();
}
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
Random r;
private int nextInt(int lim) {
return r.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (r.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
r = newRandom();
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
for(int iter=0;iter<100000*_TestUtil.getRandomMultiplier();iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
r = newRandom();
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
for(int iter=0;iter<100000*_TestUtil.getRandomMultiplier();iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
TermPositions tps = s.getIndexReader().termPositions(new Term("field", "a"));
assertTrue(tps.next());
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
assertTrue(_TestUtil.checkIndex(dir));
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
private abstract static class RunAddIndexesThreads {
Directory dir, dir2;
final static int NUM_INIT_DOCS = 17;
IndexWriter writer2;
final List<Throwable> failures = new ArrayList<Throwable>();
volatile boolean didClose;
final IndexReader[] readers;
final int NUM_COPY;
final static int NUM_THREADS = 5;
final Thread[] threads = new Thread[NUM_THREADS];
public RunAddIndexesThreads(int numCopy) throws Throwable {
NUM_COPY = numCopy;
dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
for (int i = 0; i < NUM_INIT_DOCS; i++)
addDoc(writer);
writer.close();
dir2 = new MockRAMDirectory();
writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.commit();
readers = new IndexReader[NUM_COPY];
for(int i=0;i<NUM_COPY;i++)
readers[i] = IndexReader.open(dir, true);
}
void launchThreads(final int numIter) {
for(int i=0;i<NUM_THREADS;i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
final Directory[] dirs = new Directory[NUM_COPY];
for(int k=0;k<NUM_COPY;k++)
dirs[k] = new MockRAMDirectory(dir);
int j=0;
while(true) {
// System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
if (numIter > 0 && j == numIter)
break;
doBody(j++, dirs);
}
} catch (Throwable t) {
handle(t);
}
}
};
}
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
}
void joinThreads() throws Exception {
for(int i=0;i<NUM_THREADS;i++)
threads[i].join();
}
void close(boolean doWait) throws Throwable {
didClose = true;
writer2.close(doWait);
}
void closeDir() throws Throwable {
for(int i=0;i<NUM_COPY;i++)
readers[i].close();
dir2.close();
}
abstract void doBody(int j, Directory[] dirs) throws Throwable;
abstract void handle(Throwable t);
}
private class CommitAndAddIndexes extends RunAddIndexesThreads {
public CommitAndAddIndexes(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void handle(Throwable t) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
@Override
void doBody(int j, Directory[] dirs) throws Throwable {
switch(j%5) {
case 0:
writer2.addIndexes(dirs);
writer2.optimize();
break;
case 1:
writer2.addIndexes(dirs);
break;
case 2:
writer2.addIndexes(readers);
break;
case 3:
writer2.addIndexes(dirs);
writer2.maybeMerge();
break;
case 4:
writer2.commit();
}
}
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
public void testAddIndexesWithThreads() throws Throwable {
final int NUM_ITER = 15;
final int NUM_COPY = 3;
CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
c.launchThreads(NUM_ITER);
for(int i=0;i<100;i++)
addDoc(c.writer2);
c.joinThreads();
int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
assertEquals(expectedNumDocs, c.writer2.numDocs());
c.close(true);
assertTrue(c.failures.size() == 0);
_TestUtil.checkIndex(c.dir2);
IndexReader reader = IndexReader.open(c.dir2, true);
assertEquals(expectedNumDocs, reader.numDocs());
reader.close();
c.closeDir();
}
private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
public CommitAndAddIndexes2(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void handle(Throwable t) {
if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
}
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithClose() throws Throwable {
final int NUM_COPY = 3;
CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
//c.writer2.setInfoStream(System.out);
c.launchThreads(-1);
// Close w/o first stopping/joining the threads
c.close(true);
//c.writer2.close();
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
public CommitAndAddIndexes3(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void doBody(int j, Directory[] dirs) throws Throwable {
switch(j%5) {
case 0:
writer2.addIndexes(dirs);
writer2.optimize();
break;
case 1:
writer2.addIndexes(dirs);
break;
case 2:
writer2.addIndexes(readers);
break;
case 3:
writer2.optimize();
case 4:
writer2.commit();
}
}
@Override
void handle(Throwable t) {
boolean report = true;
if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
report = !didClose;
} else if (t instanceof IOException) {
Throwable t2 = t.getCause();
if (t2 instanceof MergePolicy.MergeAbortedException) {
report = !didClose;
}
}
if (report) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
}
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithCloseNoWait() throws Throwable {
final int NUM_COPY = 50;
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
c.launchThreads(-1);
Thread.sleep(500);
// Close w/o first stopping/joining the threads
c.close(false);
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithRollback() throws Throwable {
final int NUM_COPY = 50;
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
c.launchThreads(-1);
Thread.sleep(500);
// Close w/o first stopping/joining the threads
c.didClose = true;
c.writer2.rollback();
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
// LUCENE-1347
private static final class MockIndexWriter4 extends IndexWriter {
public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("rollback before checkpoint"))
throw new RuntimeException("intentionally failing");
return true;
}
}
// LUCENE-1347
public void testRollbackExceptionHang() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter4 w = new MockIndexWriter4(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(w);
w.doFail = true;
try {
w.rollback();
fail("did not hit intentional RuntimeException");
} catch (RuntimeException re) {
// expected
}
w.doFail = false;
w.rollback();
}
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
public void testOptimizeExceptions() throws IOException {
RAMDirectory startDir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
IndexWriter w = new IndexWriter(startDir, conf);
for(int i=0;i<27;i++)
addDoc(w);
w.close();
for(int i=0;i<200;i++) {
MockRAMDirectory dir = new MockRAMDirectory(startDir);
conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
w = new IndexWriter(dir, conf);
dir.setRandomIOExceptionRate(0.5, 100);
try {
w.optimize();
} catch (IOException ioe) {
if (ioe.getCause() == null)
fail("optimize threw IOException without root cause");
}
w.close();
dir.close();
}
}
// LUCENE-1429
public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
final List<Throwable> thrown = new ArrayList<Throwable>();
final IndexWriter writer = new IndexWriter(new MockRAMDirectory(),
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())) {
@Override
public void message(final String message) {
if (message.startsWith("now flush at close") && 0 == thrown.size()) {
thrown.add(null);
throw new OutOfMemoryError("fake OOME at " + message);
}
}
};
// need to set an info stream so message is called
writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
try {
writer.close();
fail("OutOfMemoryError expected");
}
catch (final OutOfMemoryError expected) {}
// throws IllegalStateEx w/o bug fix
writer.close();
}
// LUCENE-1442
public void testDoubleOffsetCounting() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
Field f2 = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
// Token "" occurred once
assertEquals(1, termOffsets.length);
assertEquals(8, termOffsets[0].getStartOffset());
assertEquals(8, termOffsets[0].getEndOffset());
// Token "abcd" occurred three times
termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(1);
assertEquals(3, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(4, termOffsets[1].getStartOffset());
assertEquals(8, termOffsets[1].getEndOffset());
assertEquals(8, termOffsets[2].getStartOffset());
assertEquals(12, termOffsets[2].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1442
public void testDoubleOffsetCounting2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(5, termOffsets[1].getStartOffset());
assertEquals(9, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionCharAnalyzer() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(8, termOffsets[1].getStartOffset());
assertEquals(12, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
Analyzer analyzer = new MockAnalyzer();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd ")));
Field f = new Field("field", stream, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(8, termOffsets[1].getStartOffset());
assertEquals(12, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStopFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
Document doc = new Document();
Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(9, termOffsets[1].getStartOffset());
assertEquals(13, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandard() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd the ", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(11, termOffsets[0].getStartOffset());
assertEquals(17, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(2);
assertEquals(18, termOffsets[0].getStartOffset());
assertEquals(21, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(6, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(7, termOffsets[0].getStartOffset());
assertEquals(10, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field f2 = new Field("field", "crunch", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(5, termOffsets[0].getStartOffset());
assertEquals(11, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
File indexDir = new File(TEMP_DIR, "otherfiles");
Directory dir = FSDirectory.open(indexDir);
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
_TestUtil.rmDir(indexDir);
}
}
public void testDeadlock() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
MockRAMDirectory dir2 = new MockRAMDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(new IndexReader[] {r1, r2});
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
boolean allowInterrupt = false;
@Override
public void run() {
RAMDirectory dir = new RAMDirectory();
IndexWriter w = null;
boolean first = true;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
//((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions();
if (!first && !allowInterrupt) {
// tell main thread it can interrupt us any time,
// starting now
allowInterrupt = true;
}
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
w.commit();
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
if (first && !allowInterrupt) {
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
first = false;
}
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
// Make sure IW cleared the interrupted bit
// TODO: remove that false once test is fixed for real
if (false && interrupted()) {
System.out.println("FAILED; InterruptedException hit but thread.interrupted() was true");
e.printStackTrace(System.out);
failed = true;
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(1);
if (t.allowInterrupt) {
i++;
t.allowInterrupt = false;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.allowInterrupt = false;
t.finish = true;
t.interrupt();
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
assertTrue(ir.termDocs(new Term("binary","doc1field1")).next());
assertTrue(ir.termDocs(new Term("binary","doc2field1")).next());
assertTrue(ir.termDocs(new Term("binary","doc3field1")).next());
assertTrue(ir.termDocs(new Term("string","doc1field2")).next());
assertTrue(ir.termDocs(new Term("string","doc2field2")).next());
assertTrue(ir.termDocs(new Term("string","doc3field2")).next());
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = new MockRAMDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = new MockRAMDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
lmp.setUseCompoundDocStore(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = new MockRAMDirectory();
final IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
w.close();
dir.close();
assertFalse(failed.get());
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
char[] last = new char[2];
int lastLength = 0;
Set<String> seenTerms = new HashSet<String>();
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
UnicodeUtil.UTF8toUTF16(term.bytes, term.offset, term.length, utf16);
assertTrue(utf16.length <= 2);
// Make sure last term comes before current one, in
// UTF16 sort order
int i = 0;
for(i=0;i<lastLength && i<utf16.length;i++) {
assertTrue("UTF16 code unit " + termDesc(new String(utf16.result, 0, utf16.length)) + " incorrectly sorted after code unit " + termDesc(new String(last, 0, lastLength)), last[i] <= utf16.result[i]);
if (last[i] < utf16.result[i]) {
break;
}
}
// Terms should not have been identical
assertTrue(lastLength != utf16.length || i < lastLength);
final String s = new String(utf16.result, 0, utf16.length);
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
System.arraycopy(utf16.result, 0, last, 0, utf16.length);
lastLength = utf16.length;
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in UTF16 sort order by default
public void testTermUTF16SortOrder() throws Throwable {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
Random rnd = newRandom();
final Set<String> allTerms = new HashSet<String>();
for(int i=0;i<200*_TestUtil.getRandomMultiplier();i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
//System.out.println("add " + termDesc(s));
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(""+i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader(2).getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocsEnum.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = new MockRAMDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
public void doAfterFlush() {
flushCount++;
}
}
public void testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
Directory dir = new MockRAMDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
for(int iter=0;iter<6*_TestUtil.getRandomMultiplier();iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
FSDirectory dir = FSDirectory.open(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
// Creating over empty dir should not create any files.
assertEquals(0, dir.listAll().length);
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
assertTrue("flush should have occurred and files created", dir.listAll().length > 5);
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
public TestIndexWriter(String name) {
super(name);
}
public void testDocCount() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
}
private static void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
public void testAddIndexOnDiskFull() throws IOException
{
int START_COUNT = 57;
int NUM_DIR = 50;
int END_COUNT = START_COUNT + NUM_DIR*25;
// Build up a bunch of dirs that have indexes which we
// will then merge together by calling addIndexes(*):
Directory[] dirs = new Directory[NUM_DIR];
long inputDiskUsage = 0;
for(int i=0;i<NUM_DIR;i++) {
dirs[i] = new RAMDirectory();
IndexWriter writer = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<25;j++) {
addDocWithIndex(writer, 25*i+j);
}
writer.close();
String[] files = dirs[i].listAll();
for(int j=0;j<files.length;j++) {
inputDiskUsage += dirs[i].fileLength(files[j]);
}
}
// Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexesNoOptimize into a copy of this:
RAMDirectory startDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(startDir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<START_COUNT;j++) {
addDocWithIndex(writer, j);
}
writer.close();
// Make sure starting index seems to be working properly:
Term searchTerm = new Term("content", "aaa");
IndexReader reader = IndexReader.open(startDir, true);
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 57, hits.length);
searcher.close();
reader.close();
// Iterate with larger and larger amounts of free
// disk space. With little free disk space,
// addIndexes will certainly run out of space &
// fail. Verify that when this happens, index is
// not corrupt and index in fact has added no
// documents. Then, we increase disk space by 2000
// bytes each iteration. At some point there is
// enough free disk space and addIndexes should
// succeed and index should show all documents were
// added.
// String[] files = startDir.listAll();
long diskUsage = startDir.sizeInBytes();
long startDiskUsage = 0;
String[] files = startDir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += startDir.fileLength(files[i]);
}
for(int iter=0;iter<3;iter++) {
if (VERBOSE)
System.out.println("TEST: iter=" + iter);
// Start with 100 bytes more than we are currently using:
long diskFree = diskUsage+100;
int method = iter;
boolean success = false;
boolean done = false;
String methodName;
if (0 == method) {
methodName = "addIndexes(Directory[]) + optimize()";
} else if (1 == method) {
methodName = "addIndexes(IndexReader[])";
} else {
methodName = "addIndexes(Directory[])";
}
while(!done) {
// Make a new dir that will enforce disk usage:
MockRAMDirectory dir = new MockRAMDirectory(startDir);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IOException err = null;
MergeScheduler ms = writer.getConfig().getMergeScheduler();
for(int x=0;x<2;x++) {
if (ms instanceof ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
if (0 == x)
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
else
((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
// Two loops: first time, limit disk space &
// throw random IOExceptions; second time, no
// disk space limit:
double rate = 0.05;
double diskRatio = ((double) diskFree)/diskUsage;
long thisDiskFree;
String testName = null;
if (0 == x) {
thisDiskFree = diskFree;
if (diskRatio >= 2.0) {
rate /= 2;
}
if (diskRatio >= 4.0) {
rate /= 2;
}
if (diskRatio >= 6.0) {
rate = 0.0;
}
if (VERBOSE)
testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
} else {
thisDiskFree = 0;
rate = 0.0;
if (VERBOSE)
testName = "disk full test " + methodName + " with unlimited disk space";
}
if (VERBOSE)
System.out.println("\ncycle: " + testName);
dir.setMaxSizeInBytes(thisDiskFree);
dir.setRandomIOExceptionRate(rate, diskFree);
try {
if (0 == method) {
writer.addIndexes(dirs);
writer.optimize();
} else if (1 == method) {
IndexReader readers[] = new IndexReader[dirs.length];
for(int i=0;i<dirs.length;i++) {
readers[i] = IndexReader.open(dirs[i], true);
}
try {
writer.addIndexes(readers);
} finally {
for(int i=0;i<dirs.length;i++) {
readers[i].close();
}
}
} else {
writer.addIndexes(dirs);
}
success = true;
if (VERBOSE) {
System.out.println(" success!");
}
if (0 == x) {
done = true;
}
} catch (IOException e) {
success = false;
err = e;
if (VERBOSE) {
System.out.println(" hit IOException: " + e);
e.printStackTrace(System.out);
}
if (1 == x) {
e.printStackTrace(System.out);
fail(methodName + " hit IOException after disk space was freed up");
}
}
// Make sure all threads from
// ConcurrentMergeScheduler are done
_TestUtil.syncConcurrentMerges(writer);
if (VERBOSE) {
System.out.println(" now test readers");
}
// Finally, verify index is not corrupt, and, if
// we succeeded, we see all docs added, and if we
// failed, we see either all docs or no docs added
// (transactional semantics):
try {
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when creating IndexReader: " + e);
}
int result = reader.docFreq(searchTerm);
if (success) {
if (result != START_COUNT) {
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
}
} else {
// On hitting exception we still may have added
// all docs:
if (result != START_COUNT && result != END_COUNT) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
}
}
searcher = new IndexSearcher(reader);
try {
hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when searching: " + e);
}
int result2 = hits.length;
if (success) {
if (result2 != result) {
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
} else {
// On hitting exception we still may have added
// all docs:
if (result2 != result) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
}
searcher.close();
reader.close();
if (VERBOSE) {
System.out.println(" count is " + result);
}
if (done || result == END_COUNT) {
break;
}
}
if (VERBOSE) {
System.out.println(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
}
if (done) {
// Javadocs state that temp free Directory space
// required is at most 2X total input size of
// indices so let's make sure:
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
"starting disk usage = " + startDiskUsage + " bytes; " +
"input index disk usage = " + inputDiskUsage + " bytes",
(dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
}
// Make sure we don't hit disk full during close below:
dir.setMaxSizeInBytes(0);
dir.setRandomIOExceptionRate(0.0, 0);
writer.close();
// Wait for all BG threads to finish else
// dir.close() will throw IOException because
// there are still open files
_TestUtil.syncConcurrentMerges(ms);
dir.close();
// Try again with 5000 more bytes of free space:
diskFree += 5000;
}
}
startDir.close();
}
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public void testAddDocumentOnDiskFull() throws IOException {
for(int pass=0;pass<2;pass++) {
if (VERBOSE)
System.out.println("TEST: pass=" + pass);
boolean doAbort = pass == 1;
long diskFree = 200;
while(true) {
if (VERBOSE)
System.out.println("TEST: cycle: diskFree=" + diskFree);
MockRAMDirectory dir = new MockRAMDirectory();
dir.setMaxSizeInBytes(diskFree);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
MergeScheduler ms = writer.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
boolean hitError = false;
try {
for(int i=0;i<200;i++) {
addDoc(writer);
}
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on addDoc");
e.printStackTrace(System.out);
}
hitError = true;
}
if (hitError) {
if (doAbort) {
writer.rollback();
} else {
try {
writer.close();
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on close");
e.printStackTrace(System.out);
}
dir.setMaxSizeInBytes(0);
writer.close();
}
}
_TestUtil.syncConcurrentMerges(ms);
assertNoUnreferencedFiles(dir, "after disk full during addDocument");
// Make sure reader can open the index:
IndexReader.open(dir, true).close();
dir.close();
// Now try again w/ more space:
diskFree += 500;
} else {
_TestUtil.syncConcurrentMerges(writer);
dir.close();
break;
}
}
}
}
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
sis = new SegmentInfos();
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
writer.close();
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
}
dir.resetMaxUsedSizeInBytes();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimized used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (2*startDiskUsage) + " (= 2X starting usage)",
maxDiskUsage <= 2*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
File indexDir = _TestUtil.getTempDir("lucenetestindexwriter");
try {
Directory dir = FSDirectory.open(indexDir);
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
} finally {
rmDir(indexDir);
}
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testSimulatedCorruptIndex1() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
dir.deleteFile(fileNameIn);
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
}
public void testChangesAfterClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
public void testSimulatedCorruptIndex2() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
if (files[i].endsWith(".cfs")) {
dir.deleteFile(files[i]);
break;
}
}
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
}
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockRAMDirectory to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10).setMergeScheduler(
new SerialMergeScheduler()));
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 2X disk usage normally we allow 100X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 100X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage,
midDiskUsage < 100*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage,
endDiskUsage < 100*startDiskUsage);
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
}
public void testIndexNoDocuments() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
}
public void testManyFields() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.000001));
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
public void testChangingRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
if (j == 1)
lastFlushCount = flushCount;
else if (j < 10)
// No new files should be created
assertEquals(flushCount, lastFlushCount);
else if (10 == j) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (j < 20) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 30) {
assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (j < 40) {
assertTrue(flushCount> lastFlushCount);
lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 50) {
assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
dir.close();
}
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testChangingRAMBuffer2() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setMaxBufferedDeleteTerms(
10).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
writer.deleteDocuments(new Term("field", "aaa" + j));
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
if (j == 1)
lastFlushCount = flushCount;
else if (j < 10) {
// No new files should be created
assertEquals(flushCount, lastFlushCount);
} else if (10 == j) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(1);
} else if (j < 20) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 30) {
assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
writer.setMaxBufferedDeleteTerms(1);
} else if (j < 40) {
assertTrue(flushCount> lastFlushCount);
lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 50) {
assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
dir.close();
}
public void testDiverseDocs() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
Random rand = newRandom();
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
TermDocs td = reader.termDocs(t);
td.next();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends RAMDirectory {
private LockFactory myLockFactory;
MyRAMDirectory() {
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assert sis.size() == 10;
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = new MockRAMDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(101);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
private void rmDir(File dir) {
File[] files = dir.listFiles();
if (files != null) {
for (int i = 0; i < files.length; i++) {
files[i].delete();
}
}
dir.delete();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
public void testNoTermVectorAfterTermVector() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.optimize();
iw.close();
dir.close();
}
// LUCENE-1010
public void testNoTermVectorAfterTermVectorMerge() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
iw.commit();
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
iw.optimize();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.optimize();
iw.close();
dir.close();
}
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
TermDocs tdocs = reader.termDocs(t);
int count = 0;
while(tdocs.next()) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()))
sawAppend = true;
if ("doFlush".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testDocumentsWriterAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
failure.setDoFail();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
try {
writer.addDocument(doc);
} catch (IOException ioe) {
// only one flush should fail:
assertFalse(hitError);
hitError = true;
}
}
assertTrue(hitError);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(198, reader.docFreq(new Term("content", "aa")));
reader.close();
}
private class CrashingFilter extends TokenFilter {
String fieldName;
int count;
public CrashingFilter(String fieldName, TokenStream input) {
super(input);
this.fieldName = fieldName;
}
@Override
public boolean incrementToken() throws IOException {
if (this.fieldName.equals("crash") && count++ >= 4)
throw new IOException("I'm experiencing problems");
return input.incrementToken();
}
@Override
public void reset() throws IOException {
super.reset();
count = 0;
}
}
public void testDocumentsWriterExceptions() throws IOException {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
int expected = 3+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(1, numDel);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
analyzer).setMaxBufferedDocs(10));
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected = 19+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testDocumentsWriterExceptionThreads() throws Exception {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
final int NUM_THREAD = 3;
final int NUM_ITER = 100;
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
{
final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
final int finalI = i;
Thread[] threads = new Thread[NUM_THREAD];
for(int t=0;t<NUM_THREAD;t++) {
threads[t] = new Thread() {
@Override
public void run() {
try {
for(int iter=0;iter<NUM_ITER;iter++) {
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == finalI) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
}
} catch (Throwable t) {
synchronized(this) {
System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
t.printStackTrace(System.out);
}
fail();
}
}
};
threads[t].start();
}
for(int t=0;t<NUM_THREAD;t++)
threads[t].join();
writer.close();
}
IndexReader reader = IndexReader.open(dir, true);
int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(NUM_THREAD*NUM_ITER, numDel);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected += 17-NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testVariableSchema() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
lmp.setUseCompoundDocStore(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
lmp2.setUseCompoundDocStore(false);
writer.optimize();
writer.close();
}
}
}
public void testNoWaitClose() throws Throwable {
RAMDirectory directory = new MockRAMDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE)
.setMaxBufferedDocs(2);
if (pass == 2) {
conf.setMergeScheduler(new SerialMergeScheduler());
}
IndexWriter writer = new IndexWriter(directory, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
//System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
for(int iter=0;iter<10;iter++) {
//System.out.println("TEST: iter=" + iter);
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0)
throw failure.get(0);
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
}
writer.close();
}
directory.close();
}
// Used by test cases below
private class IndexerThread extends Thread {
boolean diskFull;
Throwable error;
AlreadyClosedException ace;
IndexWriter writer;
boolean noErrors;
volatile int addCount;
public IndexerThread(IndexWriter writer, boolean noErrors) {
this.writer = writer;
this.noErrors = noErrors;
}
@Override
public void run() {
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
int idUpto = 0;
int fullCount = 0;
final long stopTime = System.currentTimeMillis() + 200;
do {
try {
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
addCount++;
} catch (IOException ioe) {
//System.out.println(Thread.currentThread().getName() + ": hit exc");
//ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at") ||
ioe.getMessage().equals("now failing on purpose")) {
diskFull = true;
try {
Thread.sleep(1);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
if (fullCount++ >= 5)
break;
} else {
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
ioe.printStackTrace(System.out);
error = ioe;
}
break;
}
} catch (Throwable t) {
//t.printStackTrace(System.out);
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
t.printStackTrace(System.out);
error = t;
}
break;
}
} while(System.currentTimeMillis() < stopTime);
}
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
TermDocs tdocs = reader.termDocs(new Term("field", "aaa"));
int count = 0;
while(tdocs.next()) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
public void testImmediateDiskFull() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Without fix for LUCENE-1130: this call will hang:
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
try {
writer.close(false);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
public void testImmediateDiskFullWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<10;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
dir.setMaxSizeInBytes(4*1024+20*iter);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
}
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"flushDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
// Runs test, with one thread, using the specific failure
// to trigger an IOException
public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<6;i++)
writer.addDocument(doc);
dir.failOn(failure);
failure.setDoFail();
try {
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
fail("did not hit exception");
} catch (IOException ioe) {
}
failure.clearDoFail();
writer.addDocument(doc);
writer.close(false);
}
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
public void _testMultipleThreadsFailure(MockRAMDirectory.Failure failure) throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<2;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
Thread.sleep(10);
dir.failOn(failure);
failure.setDoFail();
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
boolean success = false;
try {
writer.close(false);
success = true;
} catch (IOException ioe) {
failure.clearDoFail();
writer.close(false);
}
if (success) {
IndexReader reader = IndexReader.open(dir, true);
for(int j=0;j<reader.maxDoc();j++) {
if (!reader.isDeleted(j)) {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
}
dir.close();
}
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbort() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
}
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("closeDocStore".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStore() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
}
// Throws IOException during DocumentsWriter.writeSegment
private static class FailOnlyInWriteSegment extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInWriteSegment(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegment() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
public void testSegmentsChecksumError() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
IndexInput in = dir.openInput(segmentsFileName);
IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
out.copyBytes(in, in.length()-1);
byte b = in.readByte();
out.writeByte((byte) (1+b));
out.close();
in.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail("segmentInfos failed to retry fallback to correct segments_N file");
}
reader.close();
}
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// Throws IOException during MockRAMDirectory.sync
private static class FailOnlyInSync extends MockRAMDirectory.Failure {
boolean didFail;
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if (doFail && "org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
didFail = true;
throw new IOException("now failing on purpose during sync");
}
}
}
}
}
// LUCENE-1044: test exception during sync
public void testExceptionDuringSync() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInSync failure = new FailOnlyInSync();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
failure.setDoFail();
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
for (int i = 0; i < 23; i++) {
addDoc(writer);
if ((i-1)%2 == 0) {
try {
writer.commit();
} catch (IOException ioe) {
// expected
}
}
}
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
assertTrue(failure.didFail);
failure.clearDoFail();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption() throws IOException {
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<reader.numDocs();i++) {
reader.document(i);
reader.getTermFreqVectors(i);
}
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Directory[] indexDirs = {new MockRAMDirectory(dir)};
writer.addIndexes(indexDirs);
writer.optimize();
writer.close();
}
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption2() throws IOException {
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.getTermFreqVectors(0)==null);
assertTrue(reader.getTermFreqVectors(1)==null);
assertTrue(reader.getTermFreqVectors(2)!=null);
reader.close();
}
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(new LogDocMergePolicy()));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
for(int i=0;i<6;i++)
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<10;i++) {
reader.getTermFreqVectors(i);
reader.document(i);
}
reader.close();
dir.close();
}
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// Force many merges to happen
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
}
// LUCENE-1198
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriter.ThreadState.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
public void testExceptionJustBeforeFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
Document crashDoc = new Document();
crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
Field.Index.ANALYZED));
try {
w.addDocument(crashDoc, analyzer);
fail("did not hit expected exception");
} catch (IOException ioe) {
// expected
}
w.addDocument(doc);
w.close();
dir.close();
}
private static final class MockIndexWriter2 extends IndexWriter {
public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
boolean failed;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("startMergeInit")) {
failed = true;
throw new RuntimeException("intentionally failing");
}
return true;
}
}
// LUCENE-1210
public void testExceptionOnMergeInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
w.doFail = true;
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
for(int i=0;i<10;i++)
try {
w.addDocument(doc);
} catch (RuntimeException re) {
break;
}
((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
assertTrue(w.failed);
w.close();
dir.close();
}
private static final class MockIndexWriter3 extends IndexWriter {
public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
}
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter3 w = new MockIndexWriter3(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
private static class FailOnlyInCommit extends MockRAMDirectory.Failure {
boolean fail1, fail2;
@Override
public void eval(MockRAMDirectory dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean isCommit = false;
boolean isDelete = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
isCommit = true;
if ("org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
isDelete = true;
}
if (isCommit) {
if (!isDelete) {
fail1 = true;
throw new RuntimeException("now fail first");
} else {
fail2 = true;
throw new IOException("now fail during delete");
}
}
}
}
// LUCENE-1214
public void testExceptionsDuringCommit() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInCommit failure = new FailOnlyInCommit();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
dir.failOn(failure);
try {
w.close();
fail();
} catch (IOException ioe) {
fail("expected only RuntimeException");
} catch (RuntimeException re) {
// Expected
}
assertTrue(failure.fail1 && failure.fail2);
w.rollback();
dir.close();
}
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
Random r;
private int nextInt(int lim) {
return r.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (r.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
r = newRandom();
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
for(int iter=0;iter<100000*_TestUtil.getRandomMultiplier();iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
r = newRandom();
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
for(int iter=0;iter<100000*_TestUtil.getRandomMultiplier();iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
TermPositions tps = s.getIndexReader().termPositions(new Term("field", "a"));
assertTrue(tps.next());
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
assertTrue(_TestUtil.checkIndex(dir));
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
private abstract static class RunAddIndexesThreads {
Directory dir, dir2;
final static int NUM_INIT_DOCS = 17;
IndexWriter writer2;
final List<Throwable> failures = new ArrayList<Throwable>();
volatile boolean didClose;
final IndexReader[] readers;
final int NUM_COPY;
final static int NUM_THREADS = 5;
final Thread[] threads = new Thread[NUM_THREADS];
public RunAddIndexesThreads(int numCopy) throws Throwable {
NUM_COPY = numCopy;
dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
for (int i = 0; i < NUM_INIT_DOCS; i++)
addDoc(writer);
writer.close();
dir2 = new MockRAMDirectory();
writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.commit();
readers = new IndexReader[NUM_COPY];
for(int i=0;i<NUM_COPY;i++)
readers[i] = IndexReader.open(dir, true);
}
void launchThreads(final int numIter) {
for(int i=0;i<NUM_THREADS;i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
final Directory[] dirs = new Directory[NUM_COPY];
for(int k=0;k<NUM_COPY;k++)
dirs[k] = new MockRAMDirectory(dir);
int j=0;
while(true) {
// System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
if (numIter > 0 && j == numIter)
break;
doBody(j++, dirs);
}
} catch (Throwable t) {
handle(t);
}
}
};
}
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
}
void joinThreads() throws Exception {
for(int i=0;i<NUM_THREADS;i++)
threads[i].join();
}
void close(boolean doWait) throws Throwable {
didClose = true;
writer2.close(doWait);
}
void closeDir() throws Throwable {
for(int i=0;i<NUM_COPY;i++)
readers[i].close();
dir2.close();
}
abstract void doBody(int j, Directory[] dirs) throws Throwable;
abstract void handle(Throwable t);
}
private class CommitAndAddIndexes extends RunAddIndexesThreads {
public CommitAndAddIndexes(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void handle(Throwable t) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
@Override
void doBody(int j, Directory[] dirs) throws Throwable {
switch(j%5) {
case 0:
writer2.addIndexes(dirs);
writer2.optimize();
break;
case 1:
writer2.addIndexes(dirs);
break;
case 2:
writer2.addIndexes(readers);
break;
case 3:
writer2.addIndexes(dirs);
writer2.maybeMerge();
break;
case 4:
writer2.commit();
}
}
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
public void testAddIndexesWithThreads() throws Throwable {
final int NUM_ITER = 15;
final int NUM_COPY = 3;
CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
c.launchThreads(NUM_ITER);
for(int i=0;i<100;i++)
addDoc(c.writer2);
c.joinThreads();
int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
assertEquals(expectedNumDocs, c.writer2.numDocs());
c.close(true);
assertTrue(c.failures.size() == 0);
_TestUtil.checkIndex(c.dir2);
IndexReader reader = IndexReader.open(c.dir2, true);
assertEquals(expectedNumDocs, reader.numDocs());
reader.close();
c.closeDir();
}
private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
public CommitAndAddIndexes2(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void handle(Throwable t) {
if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
}
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithClose() throws Throwable {
final int NUM_COPY = 3;
CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
//c.writer2.setInfoStream(System.out);
c.launchThreads(-1);
// Close w/o first stopping/joining the threads
c.close(true);
//c.writer2.close();
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
public CommitAndAddIndexes3(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void doBody(int j, Directory[] dirs) throws Throwable {
switch(j%5) {
case 0:
writer2.addIndexes(dirs);
writer2.optimize();
break;
case 1:
writer2.addIndexes(dirs);
break;
case 2:
writer2.addIndexes(readers);
break;
case 3:
writer2.optimize();
case 4:
writer2.commit();
}
}
@Override
void handle(Throwable t) {
boolean report = true;
if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
report = !didClose;
} else if (t instanceof IOException) {
Throwable t2 = t.getCause();
if (t2 instanceof MergePolicy.MergeAbortedException) {
report = !didClose;
}
}
if (report) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
}
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithCloseNoWait() throws Throwable {
final int NUM_COPY = 50;
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
c.launchThreads(-1);
Thread.sleep(500);
// Close w/o first stopping/joining the threads
c.close(false);
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithRollback() throws Throwable {
final int NUM_COPY = 50;
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
c.launchThreads(-1);
Thread.sleep(500);
// Close w/o first stopping/joining the threads
c.didClose = true;
c.writer2.rollback();
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
// LUCENE-1347
private static final class MockIndexWriter4 extends IndexWriter {
public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("rollback before checkpoint"))
throw new RuntimeException("intentionally failing");
return true;
}
}
// LUCENE-1347
public void testRollbackExceptionHang() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter4 w = new MockIndexWriter4(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(w);
w.doFail = true;
try {
w.rollback();
fail("did not hit intentional RuntimeException");
} catch (RuntimeException re) {
// expected
}
w.doFail = false;
w.rollback();
}
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
public void testOptimizeExceptions() throws IOException {
RAMDirectory startDir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
IndexWriter w = new IndexWriter(startDir, conf);
for(int i=0;i<27;i++)
addDoc(w);
w.close();
for(int i=0;i<200;i++) {
MockRAMDirectory dir = new MockRAMDirectory(startDir);
conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
w = new IndexWriter(dir, conf);
dir.setRandomIOExceptionRate(0.5, 100);
try {
w.optimize();
} catch (IOException ioe) {
if (ioe.getCause() == null)
fail("optimize threw IOException without root cause");
}
w.close();
dir.close();
}
}
// LUCENE-1429
public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
final List<Throwable> thrown = new ArrayList<Throwable>();
final IndexWriter writer = new IndexWriter(new MockRAMDirectory(),
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())) {
@Override
public void message(final String message) {
if (message.startsWith("now flush at close") && 0 == thrown.size()) {
thrown.add(null);
throw new OutOfMemoryError("fake OOME at " + message);
}
}
};
// need to set an info stream so message is called
writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
try {
writer.close();
fail("OutOfMemoryError expected");
}
catch (final OutOfMemoryError expected) {}
// throws IllegalStateEx w/o bug fix
writer.close();
}
// LUCENE-1442
public void testDoubleOffsetCounting() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
Field f2 = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
// Token "" occurred once
assertEquals(1, termOffsets.length);
assertEquals(8, termOffsets[0].getStartOffset());
assertEquals(8, termOffsets[0].getEndOffset());
// Token "abcd" occurred three times
termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(1);
assertEquals(3, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(4, termOffsets[1].getStartOffset());
assertEquals(8, termOffsets[1].getEndOffset());
assertEquals(8, termOffsets[2].getStartOffset());
assertEquals(12, termOffsets[2].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1442
public void testDoubleOffsetCounting2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(5, termOffsets[1].getStartOffset());
assertEquals(9, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionCharAnalyzer() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(8, termOffsets[1].getStartOffset());
assertEquals(12, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
Analyzer analyzer = new MockAnalyzer();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd ")));
Field f = new Field("field", stream, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(8, termOffsets[1].getStartOffset());
assertEquals(12, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStopFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
Document doc = new Document();
Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(9, termOffsets[1].getStartOffset());
assertEquals(13, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandard() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd the ", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(11, termOffsets[0].getStartOffset());
assertEquals(17, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(2);
assertEquals(18, termOffsets[0].getStartOffset());
assertEquals(21, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(6, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(7, termOffsets[0].getStartOffset());
assertEquals(10, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field f2 = new Field("field", "crunch", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(5, termOffsets[0].getStartOffset());
assertEquals(11, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
File indexDir = new File(TEMP_DIR, "otherfiles");
Directory dir = FSDirectory.open(indexDir);
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
_TestUtil.rmDir(indexDir);
}
}
public void testDeadlock() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
MockRAMDirectory dir2 = new MockRAMDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(new IndexReader[] {r1, r2});
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
boolean allowInterrupt = false;
@Override
public void run() {
RAMDirectory dir = new RAMDirectory();
IndexWriter w = null;
boolean first = true;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
//((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions();
if (!first && !allowInterrupt) {
// tell main thread it can interrupt us any time,
// starting now
allowInterrupt = true;
}
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
w.commit();
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
if (first && !allowInterrupt) {
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
first = false;
}
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
// Make sure IW cleared the interrupted bit
// TODO: remove that false once test is fixed for real
if (false && interrupted()) {
System.out.println("FAILED; InterruptedException hit but thread.interrupted() was true");
e.printStackTrace(System.out);
failed = true;
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(1);
if (t.allowInterrupt) {
i++;
t.allowInterrupt = false;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.allowInterrupt = false;
t.finish = true;
t.interrupt();
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
assertTrue(ir.termDocs(new Term("binary","doc1field1")).next());
assertTrue(ir.termDocs(new Term("binary","doc2field1")).next());
assertTrue(ir.termDocs(new Term("binary","doc3field1")).next());
assertTrue(ir.termDocs(new Term("string","doc1field2")).next());
assertTrue(ir.termDocs(new Term("string","doc2field2")).next());
assertTrue(ir.termDocs(new Term("string","doc3field2")).next());
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = new MockRAMDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = new MockRAMDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
lmp.setUseCompoundDocStore(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = new MockRAMDirectory();
final IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
w.close();
dir.close();
assertFalse(failed.get());
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
char[] last = new char[2];
int lastLength = 0;
Set<String> seenTerms = new HashSet<String>();
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
UnicodeUtil.UTF8toUTF16(term.bytes, term.offset, term.length, utf16);
assertTrue(utf16.length <= 2);
// Make sure last term comes before current one, in
// UTF16 sort order
int i = 0;
for(i=0;i<lastLength && i<utf16.length;i++) {
assertTrue("UTF16 code unit " + termDesc(new String(utf16.result, 0, utf16.length)) + " incorrectly sorted after code unit " + termDesc(new String(last, 0, lastLength)), last[i] <= utf16.result[i]);
if (last[i] < utf16.result[i]) {
break;
}
}
// Terms should not have been identical
assertTrue(lastLength != utf16.length || i < lastLength);
final String s = new String(utf16.result, 0, utf16.length);
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
System.arraycopy(utf16.result, 0, last, 0, utf16.length);
lastLength = utf16.length;
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in UTF16 sort order by default
public void testTermUTF16SortOrder() throws Throwable {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
Random rnd = newRandom();
final Set<String> allTerms = new HashSet<String>();
for(int i=0;i<200*_TestUtil.getRandomMultiplier();i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
//System.out.println("add " + termDesc(s));
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(""+i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader(2).getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocsEnum.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = new MockRAMDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
public void doAfterFlush() {
flushCount++;
}
}
public void testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
Directory dir = new MockRAMDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
for(int iter=0;iter<6*_TestUtil.getRandomMultiplier();iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
FSDirectory dir = FSDirectory.open(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
// Creating over empty dir should not create any files.
assertEquals(0, dir.listAll().length);
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
assertTrue("flush should have occurred and files created", dir.listAll().length > 5);
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
}
}
Left
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
}
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
dir.close();
}
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(newField("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(newField("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(newField("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(newField("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(newField("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
}
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
Random rnd = random;
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
public void testIndexingThenDeleting() throws Exception {
final Random r = random;
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
}
public void testFutureCommit() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
dir.close();
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
}
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
dir.close();
}
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(newField("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(newField("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(newField("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(newField("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(newField("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
}
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
Random rnd = random;
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
public void testIndexingThenDeleting() throws Exception {
final Random r = random;
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
}
public void testFutureCommit() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
dir.close();
}
}
Right
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
public TestIndexWriter(String name) {
super(name);
}
public void testDocCount() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
}
private static void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
public void testAddIndexOnDiskFull() throws IOException
{
int START_COUNT = 57;
int NUM_DIR = 50;
int END_COUNT = START_COUNT + NUM_DIR*25;
// Build up a bunch of dirs that have indexes which we
// will then merge together by calling addIndexes(*):
Directory[] dirs = new Directory[NUM_DIR];
long inputDiskUsage = 0;
for(int i=0;i<NUM_DIR;i++) {
dirs[i] = new RAMDirectory();
IndexWriter writer = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<25;j++) {
addDocWithIndex(writer, 25*i+j);
}
writer.close();
String[] files = dirs[i].listAll();
for(int j=0;j<files.length;j++) {
inputDiskUsage += dirs[i].fileLength(files[j]);
}
}
// Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexesNoOptimize into a copy of this:
RAMDirectory startDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(startDir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<START_COUNT;j++) {
addDocWithIndex(writer, j);
}
writer.close();
// Make sure starting index seems to be working properly:
Term searchTerm = new Term("content", "aaa");
IndexReader reader = IndexReader.open(startDir, true);
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 57, hits.length);
searcher.close();
reader.close();
// Iterate with larger and larger amounts of free
// disk space. With little free disk space,
// addIndexes will certainly run out of space &
// fail. Verify that when this happens, index is
// not corrupt and index in fact has added no
// documents. Then, we increase disk space by 2000
// bytes each iteration. At some point there is
// enough free disk space and addIndexes should
// succeed and index should show all documents were
// added.
// String[] files = startDir.listAll();
long diskUsage = startDir.sizeInBytes();
long startDiskUsage = 0;
String[] files = startDir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += startDir.fileLength(files[i]);
}
for(int iter=0;iter<3;iter++) {
if (VERBOSE)
System.out.println("TEST: iter=" + iter);
// Start with 100 bytes more than we are currently using:
long diskFree = diskUsage+100;
int method = iter;
boolean success = false;
boolean done = false;
String methodName;
if (0 == method) {
methodName = "addIndexes(Directory[]) + optimize()";
} else if (1 == method) {
methodName = "addIndexes(IndexReader[])";
} else {
methodName = "addIndexes(Directory[])";
}
while(!done) {
// Make a new dir that will enforce disk usage:
MockRAMDirectory dir = new MockRAMDirectory(startDir);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IOException err = null;
MergeScheduler ms = writer.getConfig().getMergeScheduler();
for(int x=0;x<2;x++) {
if (ms instanceof ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
if (0 == x)
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
else
((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
// Two loops: first time, limit disk space &
// throw random IOExceptions; second time, no
// disk space limit:
double rate = 0.05;
double diskRatio = ((double) diskFree)/diskUsage;
long thisDiskFree;
String testName = null;
if (0 == x) {
thisDiskFree = diskFree;
if (diskRatio >= 2.0) {
rate /= 2;
}
if (diskRatio >= 4.0) {
rate /= 2;
}
if (diskRatio >= 6.0) {
rate = 0.0;
}
if (VERBOSE)
testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
} else {
thisDiskFree = 0;
rate = 0.0;
if (VERBOSE)
testName = "disk full test " + methodName + " with unlimited disk space";
}
if (VERBOSE)
System.out.println("\ncycle: " + testName);
dir.setMaxSizeInBytes(thisDiskFree);
dir.setRandomIOExceptionRate(rate, diskFree);
try {
if (0 == method) {
writer.addIndexes(dirs);
writer.optimize();
} else if (1 == method) {
IndexReader readers[] = new IndexReader[dirs.length];
for(int i=0;i<dirs.length;i++) {
readers[i] = IndexReader.open(dirs[i], true);
}
try {
writer.addIndexes(readers);
} finally {
for(int i=0;i<dirs.length;i++) {
readers[i].close();
}
}
} else {
writer.addIndexes(dirs);
}
success = true;
if (VERBOSE) {
System.out.println(" success!");
}
if (0 == x) {
done = true;
}
} catch (IOException e) {
success = false;
err = e;
if (VERBOSE) {
System.out.println(" hit IOException: " + e);
e.printStackTrace(System.out);
}
if (1 == x) {
e.printStackTrace(System.out);
fail(methodName + " hit IOException after disk space was freed up");
}
}
// Make sure all threads from
// ConcurrentMergeScheduler are done
_TestUtil.syncConcurrentMerges(writer);
if (VERBOSE) {
System.out.println(" now test readers");
}
// Finally, verify index is not corrupt, and, if
// we succeeded, we see all docs added, and if we
// failed, we see either all docs or no docs added
// (transactional semantics):
try {
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when creating IndexReader: " + e);
}
int result = reader.docFreq(searchTerm);
if (success) {
if (result != START_COUNT) {
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
}
} else {
// On hitting exception we still may have added
// all docs:
if (result != START_COUNT && result != END_COUNT) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
}
}
searcher = new IndexSearcher(reader);
try {
hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when searching: " + e);
}
int result2 = hits.length;
if (success) {
if (result2 != result) {
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
} else {
// On hitting exception we still may have added
// all docs:
if (result2 != result) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
}
searcher.close();
reader.close();
if (VERBOSE) {
System.out.println(" count is " + result);
}
if (done || result == END_COUNT) {
break;
}
}
if (VERBOSE) {
System.out.println(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
}
if (done) {
// Javadocs state that temp free Directory space
// required is at most 2X total input size of
// indices so let's make sure:
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
"starting disk usage = " + startDiskUsage + " bytes; " +
"input index disk usage = " + inputDiskUsage + " bytes",
(dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
}
// Make sure we don't hit disk full during close below:
dir.setMaxSizeInBytes(0);
dir.setRandomIOExceptionRate(0.0, 0);
writer.close();
// Wait for all BG threads to finish else
// dir.close() will throw IOException because
// there are still open files
_TestUtil.syncConcurrentMerges(ms);
dir.close();
// Try again with 5000 more bytes of free space:
diskFree += 5000;
}
}
startDir.close();
}
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public void testAddDocumentOnDiskFull() throws IOException {
for(int pass=0;pass<2;pass++) {
if (VERBOSE)
System.out.println("TEST: pass=" + pass);
boolean doAbort = pass == 1;
long diskFree = 200;
while(true) {
if (VERBOSE)
System.out.println("TEST: cycle: diskFree=" + diskFree);
MockRAMDirectory dir = new MockRAMDirectory();
dir.setMaxSizeInBytes(diskFree);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
MergeScheduler ms = writer.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
boolean hitError = false;
try {
for(int i=0;i<200;i++) {
addDoc(writer);
}
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on addDoc");
e.printStackTrace(System.out);
}
hitError = true;
}
if (hitError) {
if (doAbort) {
writer.rollback();
} else {
try {
writer.close();
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on close");
e.printStackTrace(System.out);
}
dir.setMaxSizeInBytes(0);
writer.close();
}
}
_TestUtil.syncConcurrentMerges(ms);
assertNoUnreferencedFiles(dir, "after disk full during addDocument");
// Make sure reader can open the index:
IndexReader.open(dir, true).close();
dir.close();
// Now try again w/ more space:
diskFree += 500;
} else {
_TestUtil.syncConcurrentMerges(writer);
dir.close();
break;
}
}
}
}
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
sis = new SegmentInfos();
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
writer.close();
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
}
dir.resetMaxUsedSizeInBytes();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimized used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (2*startDiskUsage) + " (= 2X starting usage)",
maxDiskUsage <= 2*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
File indexDir = _TestUtil.getTempDir("lucenetestindexwriter");
try {
Directory dir = FSDirectory.open(indexDir);
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
} finally {
rmDir(indexDir);
}
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testSimulatedCorruptIndex1() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
dir.deleteFile(fileNameIn);
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
}
public void testChangesAfterClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
public void testSimulatedCorruptIndex2() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
if (files[i].endsWith(".cfs")) {
dir.deleteFile(files[i]);
break;
}
}
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
}
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockRAMDirectory to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10).setMergeScheduler(
new SerialMergeScheduler()));
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 2X disk usage normally we allow 100X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 100X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage,
midDiskUsage < 100*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage,
endDiskUsage < 100*startDiskUsage);
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
}
public void testIndexNoDocuments() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
}
public void testManyFields() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.000001));
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
public void testChangingRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
if (j == 1)
lastFlushCount = flushCount;
else if (j < 10)
// No new files should be created
assertEquals(flushCount, lastFlushCount);
else if (10 == j) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (j < 20) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 30) {
assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (j < 40) {
assertTrue(flushCount> lastFlushCount);
lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 50) {
assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
dir.close();
}
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testChangingRAMBuffer2() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setMaxBufferedDeleteTerms(
10).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
writer.deleteDocuments(new Term("field", "aaa" + j));
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
if (j == 1)
lastFlushCount = flushCount;
else if (j < 10) {
// No new files should be created
assertEquals(flushCount, lastFlushCount);
} else if (10 == j) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(1);
} else if (j < 20) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 30) {
assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
writer.setMaxBufferedDeleteTerms(1);
} else if (j < 40) {
assertTrue(flushCount> lastFlushCount);
lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 50) {
assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
dir.close();
}
public void testDiverseDocs() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
Random rand = newRandom();
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends RAMDirectory {
private LockFactory myLockFactory;
MyRAMDirectory() {
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assert sis.size() == 10;
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = new MockRAMDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(101);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
private void rmDir(File dir) {
File[] files = dir.listFiles();
if (files != null) {
for (int i = 0; i < files.length; i++) {
files[i].delete();
}
}
dir.delete();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
public void testNoTermVectorAfterTermVector() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.optimize();
iw.close();
dir.close();
}
// LUCENE-1010
public void testNoTermVectorAfterTermVectorMerge() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
iw.commit();
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
iw.optimize();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.optimize();
iw.close();
dir.close();
}
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testDocumentsWriterAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
failure.setDoFail();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
try {
writer.addDocument(doc);
} catch (IOException ioe) {
// only one flush should fail:
assertFalse(hitError);
hitError = true;
}
}
assertTrue(hitError);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(198, reader.docFreq(new Term("content", "aa")));
reader.close();
}
private class CrashingFilter extends TokenFilter {
String fieldName;
int count;
public CrashingFilter(String fieldName, TokenStream input) {
super(input);
this.fieldName = fieldName;
}
@Override
public boolean incrementToken() throws IOException {
if (this.fieldName.equals("crash") && count++ >= 4)
throw new IOException("I'm experiencing problems");
return input.incrementToken();
}
@Override
public void reset() throws IOException {
super.reset();
count = 0;
}
}
public void testDocumentsWriterExceptions() throws IOException {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
int expected = 3+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(1, numDel);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
analyzer).setMaxBufferedDocs(10));
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected = 19+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testDocumentsWriterExceptionThreads() throws Exception {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
final int NUM_THREAD = 3;
final int NUM_ITER = 100;
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
{
final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
final int finalI = i;
Thread[] threads = new Thread[NUM_THREAD];
for(int t=0;t<NUM_THREAD;t++) {
threads[t] = new Thread() {
@Override
public void run() {
try {
for(int iter=0;iter<NUM_ITER;iter++) {
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == finalI) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
}
} catch (Throwable t) {
synchronized(this) {
System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
t.printStackTrace(System.out);
}
fail();
}
}
};
threads[t].start();
}
for(int t=0;t<NUM_THREAD;t++)
threads[t].join();
writer.close();
}
IndexReader reader = IndexReader.open(dir, true);
int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(NUM_THREAD*NUM_ITER, numDel);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected += 17-NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testVariableSchema() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
}
public void testNoWaitClose() throws Throwable {
RAMDirectory directory = new MockRAMDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE)
.setMaxBufferedDocs(2);
if (pass == 2) {
conf.setMergeScheduler(new SerialMergeScheduler());
}
IndexWriter writer = new IndexWriter(directory, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
//System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
for(int iter=0;iter<10;iter++) {
//System.out.println("TEST: iter=" + iter);
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0)
throw failure.get(0);
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
}
writer.close();
}
directory.close();
}
// Used by test cases below
private class IndexerThread extends Thread {
boolean diskFull;
Throwable error;
AlreadyClosedException ace;
IndexWriter writer;
boolean noErrors;
volatile int addCount;
public IndexerThread(IndexWriter writer, boolean noErrors) {
this.writer = writer;
this.noErrors = noErrors;
}
@Override
public void run() {
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
int idUpto = 0;
int fullCount = 0;
final long stopTime = System.currentTimeMillis() + 200;
do {
try {
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
addCount++;
} catch (IOException ioe) {
//System.out.println(Thread.currentThread().getName() + ": hit exc");
//ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at") ||
ioe.getMessage().equals("now failing on purpose")) {
diskFull = true;
try {
Thread.sleep(1);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
if (fullCount++ >= 5)
break;
} else {
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
ioe.printStackTrace(System.out);
error = ioe;
}
break;
}
} catch (Throwable t) {
//t.printStackTrace(System.out);
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
t.printStackTrace(System.out);
error = t;
}
break;
}
} while(System.currentTimeMillis() < stopTime);
}
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
public void testImmediateDiskFull() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Without fix for LUCENE-1130: this call will hang:
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
try {
writer.close(false);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
public void testImmediateDiskFullWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<10;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
dir.setMaxSizeInBytes(4*1024+20*iter);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
}
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
// Runs test, with one thread, using the specific failure
// to trigger an IOException
public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<6;i++)
writer.addDocument(doc);
dir.failOn(failure);
failure.setDoFail();
try {
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
fail("did not hit exception");
} catch (IOException ioe) {
}
failure.clearDoFail();
writer.addDocument(doc);
writer.close(false);
}
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
public void _testMultipleThreadsFailure(MockRAMDirectory.Failure failure) throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<2;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
Thread.sleep(10);
dir.failOn(failure);
failure.setDoFail();
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
assertTrue("hit unexpected Throwable ", threads[i].error == null);
}
boolean success = false;
try {
writer.close(false);
success = true;
} catch (IOException ioe) {
failure.clearDoFail();
writer.close(false);
}
if (success) {
IndexReader reader = IndexReader.open(dir, true);
for(int j=0;j<reader.maxDoc();j++) {
if (!reader.isDeleted(j)) {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
}
dir.close();
}
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbort() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
}
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStore() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
}
// Throws IOException during DocumentsWriter.writeSegment
private static class FailOnlyInWriteSegment extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInWriteSegment(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegment() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
public void testSegmentsChecksumError() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
IndexInput in = dir.openInput(segmentsFileName);
IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
out.copyBytes(in, in.length()-1);
byte b = in.readByte();
out.writeByte((byte) (1+b));
out.close();
in.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail("segmentInfos failed to retry fallback to correct segments_N file");
}
reader.close();
}
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// Throws IOException during MockRAMDirectory.sync
private static class FailOnlyInSync extends MockRAMDirectory.Failure {
boolean didFail;
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if (doFail && "org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
didFail = true;
throw new IOException("now failing on purpose during sync");
}
}
}
}
}
// LUCENE-1044: test exception during sync
public void testExceptionDuringSync() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInSync failure = new FailOnlyInSync();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
failure.setDoFail();
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
for (int i = 0; i < 23; i++) {
addDoc(writer);
if ((i-1)%2 == 0) {
try {
writer.commit();
} catch (IOException ioe) {
// expected
}
}
}
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
assertTrue(failure.didFail);
failure.clearDoFail();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption() throws IOException {
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<reader.numDocs();i++) {
reader.document(i);
reader.getTermFreqVectors(i);
}
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Directory[] indexDirs = {new MockRAMDirectory(dir)};
writer.addIndexes(indexDirs);
writer.optimize();
writer.close();
}
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption2() throws IOException {
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.getTermFreqVectors(0)==null);
assertTrue(reader.getTermFreqVectors(1)==null);
assertTrue(reader.getTermFreqVectors(2)!=null);
reader.close();
}
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(new LogDocMergePolicy()));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
for(int i=0;i<6;i++)
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<10;i++) {
reader.getTermFreqVectors(i);
reader.document(i);
}
reader.close();
dir.close();
}
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// Force many merges to happen
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
}
// LUCENE-1198
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
public void testExceptionJustBeforeFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
Document crashDoc = new Document();
crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
Field.Index.ANALYZED));
try {
w.addDocument(crashDoc, analyzer);
fail("did not hit expected exception");
} catch (IOException ioe) {
// expected
}
w.addDocument(doc);
w.close();
dir.close();
}
private static final class MockIndexWriter2 extends IndexWriter {
public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
boolean failed;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("startMergeInit")) {
failed = true;
throw new RuntimeException("intentionally failing");
}
return true;
}
}
// LUCENE-1210
public void testExceptionOnMergeInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
w.doFail = true;
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
for(int i=0;i<10;i++)
try {
w.addDocument(doc);
} catch (RuntimeException re) {
break;
}
((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
assertTrue(w.failed);
w.close();
dir.close();
}
private static final class MockIndexWriter3 extends IndexWriter {
public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
}
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter3 w = new MockIndexWriter3(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
private static class FailOnlyInCommit extends MockRAMDirectory.Failure {
boolean fail1, fail2;
@Override
public void eval(MockRAMDirectory dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean isCommit = false;
boolean isDelete = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
isCommit = true;
if ("org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
isDelete = true;
}
if (isCommit) {
if (!isDelete) {
fail1 = true;
throw new RuntimeException("now fail first");
} else {
fail2 = true;
throw new IOException("now fail during delete");
}
}
}
}
// LUCENE-1214
public void testExceptionsDuringCommit() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInCommit failure = new FailOnlyInCommit();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
dir.failOn(failure);
try {
w.close();
fail();
} catch (IOException ioe) {
fail("expected only RuntimeException");
} catch (RuntimeException re) {
// Expected
}
assertTrue(failure.fail1 && failure.fail2);
w.rollback();
dir.close();
}
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
Random r;
private int nextInt(int lim) {
return r.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (r.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
r = newRandom();
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
r = newRandom();
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
assertTrue(_TestUtil.checkIndex(dir));
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
private abstract static class RunAddIndexesThreads {
Directory dir, dir2;
final static int NUM_INIT_DOCS = 17;
IndexWriter writer2;
final List<Throwable> failures = new ArrayList<Throwable>();
volatile boolean didClose;
final IndexReader[] readers;
final int NUM_COPY;
final static int NUM_THREADS = 5;
final Thread[] threads = new Thread[NUM_THREADS];
public RunAddIndexesThreads(int numCopy) throws Throwable {
NUM_COPY = numCopy;
dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
for (int i = 0; i < NUM_INIT_DOCS; i++)
addDoc(writer);
writer.close();
dir2 = new MockRAMDirectory();
writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.commit();
readers = new IndexReader[NUM_COPY];
for(int i=0;i<NUM_COPY;i++)
readers[i] = IndexReader.open(dir, true);
}
void launchThreads(final int numIter) {
for(int i=0;i<NUM_THREADS;i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
final Directory[] dirs = new Directory[NUM_COPY];
for(int k=0;k<NUM_COPY;k++)
dirs[k] = new MockRAMDirectory(dir);
int j=0;
while(true) {
// System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
if (numIter > 0 && j == numIter)
break;
doBody(j++, dirs);
}
} catch (Throwable t) {
handle(t);
}
}
};
}
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
}
void joinThreads() throws Exception {
for(int i=0;i<NUM_THREADS;i++)
threads[i].join();
}
void close(boolean doWait) throws Throwable {
didClose = true;
writer2.close(doWait);
}
void closeDir() throws Throwable {
for(int i=0;i<NUM_COPY;i++)
readers[i].close();
dir2.close();
}
abstract void doBody(int j, Directory[] dirs) throws Throwable;
abstract void handle(Throwable t);
}
private class CommitAndAddIndexes extends RunAddIndexesThreads {
public CommitAndAddIndexes(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void handle(Throwable t) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
@Override
void doBody(int j, Directory[] dirs) throws Throwable {
switch(j%5) {
case 0:
writer2.addIndexes(dirs);
writer2.optimize();
break;
case 1:
writer2.addIndexes(dirs);
break;
case 2:
writer2.addIndexes(readers);
break;
case 3:
writer2.addIndexes(dirs);
writer2.maybeMerge();
break;
case 4:
writer2.commit();
}
}
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
public void testAddIndexesWithThreads() throws Throwable {
final int NUM_ITER = 15;
final int NUM_COPY = 3;
CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
c.launchThreads(NUM_ITER);
for(int i=0;i<100;i++)
addDoc(c.writer2);
c.joinThreads();
int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
assertEquals(expectedNumDocs, c.writer2.numDocs());
c.close(true);
assertTrue(c.failures.size() == 0);
_TestUtil.checkIndex(c.dir2);
IndexReader reader = IndexReader.open(c.dir2, true);
assertEquals(expectedNumDocs, reader.numDocs());
reader.close();
c.closeDir();
}
private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
public CommitAndAddIndexes2(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void handle(Throwable t) {
if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
}
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithClose() throws Throwable {
final int NUM_COPY = 3;
CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
//c.writer2.setInfoStream(System.out);
c.launchThreads(-1);
// Close w/o first stopping/joining the threads
c.close(true);
//c.writer2.close();
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
public CommitAndAddIndexes3(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void doBody(int j, Directory[] dirs) throws Throwable {
switch(j%5) {
case 0:
writer2.addIndexes(dirs);
writer2.optimize();
break;
case 1:
writer2.addIndexes(dirs);
break;
case 2:
writer2.addIndexes(readers);
break;
case 3:
writer2.optimize();
case 4:
writer2.commit();
}
}
@Override
void handle(Throwable t) {
boolean report = true;
if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
report = !didClose;
} else if (t instanceof IOException) {
Throwable t2 = t.getCause();
if (t2 instanceof MergePolicy.MergeAbortedException) {
report = !didClose;
}
}
if (report) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
}
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithCloseNoWait() throws Throwable {
final int NUM_COPY = 50;
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
c.launchThreads(-1);
Thread.sleep(500);
// Close w/o first stopping/joining the threads
c.close(false);
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithRollback() throws Throwable {
final int NUM_COPY = 50;
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
c.launchThreads(-1);
Thread.sleep(500);
// Close w/o first stopping/joining the threads
c.didClose = true;
c.writer2.rollback();
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
// LUCENE-1347
private static final class MockIndexWriter4 extends IndexWriter {
public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("rollback before checkpoint"))
throw new RuntimeException("intentionally failing");
return true;
}
}
// LUCENE-1347
public void testRollbackExceptionHang() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter4 w = new MockIndexWriter4(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(w);
w.doFail = true;
try {
w.rollback();
fail("did not hit intentional RuntimeException");
} catch (RuntimeException re) {
// expected
}
w.doFail = false;
w.rollback();
}
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
public void testOptimizeExceptions() throws IOException {
RAMDirectory startDir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
IndexWriter w = new IndexWriter(startDir, conf);
for(int i=0;i<27;i++)
addDoc(w);
w.close();
for(int i=0;i<200;i++) {
MockRAMDirectory dir = new MockRAMDirectory(startDir);
conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
w = new IndexWriter(dir, conf);
dir.setRandomIOExceptionRate(0.5, 100);
try {
w.optimize();
} catch (IOException ioe) {
if (ioe.getCause() == null)
fail("optimize threw IOException without root cause");
}
w.close();
dir.close();
}
}
// LUCENE-1429
public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
final List<Throwable> thrown = new ArrayList<Throwable>();
final IndexWriter writer = new IndexWriter(new MockRAMDirectory(),
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())) {
@Override
public void message(final String message) {
if (message.startsWith("now flush at close") && 0 == thrown.size()) {
thrown.add(null);
throw new OutOfMemoryError("fake OOME at " + message);
}
}
};
// need to set an info stream so message is called
writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
try {
writer.close();
fail("OutOfMemoryError expected");
}
catch (final OutOfMemoryError expected) {}
// throws IllegalStateEx w/o bug fix
writer.close();
}
// LUCENE-1442
public void testDoubleOffsetCounting() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
Field f2 = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
// Token "" occurred once
assertEquals(1, termOffsets.length);
assertEquals(8, termOffsets[0].getStartOffset());
assertEquals(8, termOffsets[0].getEndOffset());
// Token "abcd" occurred three times
termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(1);
assertEquals(3, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(4, termOffsets[1].getStartOffset());
assertEquals(8, termOffsets[1].getEndOffset());
assertEquals(8, termOffsets[2].getStartOffset());
assertEquals(12, termOffsets[2].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1442
public void testDoubleOffsetCounting2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(5, termOffsets[1].getStartOffset());
assertEquals(9, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionCharAnalyzer() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(8, termOffsets[1].getStartOffset());
assertEquals(12, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
Analyzer analyzer = new MockAnalyzer();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd ")));
Field f = new Field("field", stream, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(8, termOffsets[1].getStartOffset());
assertEquals(12, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStopFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
Document doc = new Document();
Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(9, termOffsets[1].getStartOffset());
assertEquals(13, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandard() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd the ", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(11, termOffsets[0].getStartOffset());
assertEquals(17, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(2);
assertEquals(18, termOffsets[0].getStartOffset());
assertEquals(21, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(6, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(7, termOffsets[0].getStartOffset());
assertEquals(10, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field f2 = new Field("field", "crunch", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(5, termOffsets[0].getStartOffset());
assertEquals(11, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
File indexDir = new File(TEMP_DIR, "otherfiles");
Directory dir = FSDirectory.open(indexDir);
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
_TestUtil.rmDir(indexDir);
}
}
public void testDeadlock() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
MockRAMDirectory dir2 = new MockRAMDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(new IndexReader[] {r1, r2});
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
boolean allowInterrupt = false;
@Override
public void run() {
RAMDirectory dir = new RAMDirectory();
IndexWriter w = null;
boolean first = true;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
//((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions();
if (!first && !allowInterrupt) {
// tell main thread it can interrupt us any time,
// starting now
allowInterrupt = true;
}
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
w.commit();
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
if (first && !allowInterrupt) {
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
first = false;
}
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
// Make sure IW cleared the interrupted bit
// TODO: remove that false once test is fixed for real
if (false && interrupted()) {
System.out.println("FAILED; InterruptedException hit but thread.interrupted() was true");
e.printStackTrace(System.out);
failed = true;
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(1);
if (t.allowInterrupt) {
i++;
t.allowInterrupt = false;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.allowInterrupt = false;
t.finish = true;
t.interrupt();
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = new MockRAMDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = new MockRAMDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = new MockRAMDirectory();
final IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
w.close();
dir.close();
assertFalse(failed.get());
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(""+i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader(2).getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocsEnum.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = new MockRAMDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
public void doAfterFlush() {
flushCount++;
}
}
// nocommit - TODO: enable when flushing by RAM is implemented
public void _testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
Directory dir = new MockRAMDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
FSDirectory dir = FSDirectory.open(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
// Creating over empty dir should not create any files.
assertEquals(0, dir.listAll().length);
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
}
public void testNoSegmentFile() throws IOException {
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
}
public void testFutureCommit() throws Exception {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
public TestIndexWriter(String name) {
super(name);
}
public void testDocCount() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
}
private static void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
public void testAddIndexOnDiskFull() throws IOException
{
int START_COUNT = 57;
int NUM_DIR = 50;
int END_COUNT = START_COUNT + NUM_DIR*25;
// Build up a bunch of dirs that have indexes which we
// will then merge together by calling addIndexes(*):
Directory[] dirs = new Directory[NUM_DIR];
long inputDiskUsage = 0;
for(int i=0;i<NUM_DIR;i++) {
dirs[i] = new RAMDirectory();
IndexWriter writer = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<25;j++) {
addDocWithIndex(writer, 25*i+j);
}
writer.close();
String[] files = dirs[i].listAll();
for(int j=0;j<files.length;j++) {
inputDiskUsage += dirs[i].fileLength(files[j]);
}
}
// Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexesNoOptimize into a copy of this:
RAMDirectory startDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(startDir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<START_COUNT;j++) {
addDocWithIndex(writer, j);
}
writer.close();
// Make sure starting index seems to be working properly:
Term searchTerm = new Term("content", "aaa");
IndexReader reader = IndexReader.open(startDir, true);
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 57, hits.length);
searcher.close();
reader.close();
// Iterate with larger and larger amounts of free
// disk space. With little free disk space,
// addIndexes will certainly run out of space &
// fail. Verify that when this happens, index is
// not corrupt and index in fact has added no
// documents. Then, we increase disk space by 2000
// bytes each iteration. At some point there is
// enough free disk space and addIndexes should
// succeed and index should show all documents were
// added.
// String[] files = startDir.listAll();
long diskUsage = startDir.sizeInBytes();
long startDiskUsage = 0;
String[] files = startDir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += startDir.fileLength(files[i]);
}
for(int iter=0;iter<3;iter++) {
if (VERBOSE)
System.out.println("TEST: iter=" + iter);
// Start with 100 bytes more than we are currently using:
long diskFree = diskUsage+100;
int method = iter;
boolean success = false;
boolean done = false;
String methodName;
if (0 == method) {
methodName = "addIndexes(Directory[]) + optimize()";
} else if (1 == method) {
methodName = "addIndexes(IndexReader[])";
} else {
methodName = "addIndexes(Directory[])";
}
while(!done) {
// Make a new dir that will enforce disk usage:
MockRAMDirectory dir = new MockRAMDirectory(startDir);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IOException err = null;
MergeScheduler ms = writer.getConfig().getMergeScheduler();
for(int x=0;x<2;x++) {
if (ms instanceof ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
if (0 == x)
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
else
((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
// Two loops: first time, limit disk space &
// throw random IOExceptions; second time, no
// disk space limit:
double rate = 0.05;
double diskRatio = ((double) diskFree)/diskUsage;
long thisDiskFree;
String testName = null;
if (0 == x) {
thisDiskFree = diskFree;
if (diskRatio >= 2.0) {
rate /= 2;
}
if (diskRatio >= 4.0) {
rate /= 2;
}
if (diskRatio >= 6.0) {
rate = 0.0;
}
if (VERBOSE)
testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
} else {
thisDiskFree = 0;
rate = 0.0;
if (VERBOSE)
testName = "disk full test " + methodName + " with unlimited disk space";
}
if (VERBOSE)
System.out.println("\ncycle: " + testName);
dir.setMaxSizeInBytes(thisDiskFree);
dir.setRandomIOExceptionRate(rate, diskFree);
try {
if (0 == method) {
writer.addIndexes(dirs);
writer.optimize();
} else if (1 == method) {
IndexReader readers[] = new IndexReader[dirs.length];
for(int i=0;i<dirs.length;i++) {
readers[i] = IndexReader.open(dirs[i], true);
}
try {
writer.addIndexes(readers);
} finally {
for(int i=0;i<dirs.length;i++) {
readers[i].close();
}
}
} else {
writer.addIndexes(dirs);
}
success = true;
if (VERBOSE) {
System.out.println(" success!");
}
if (0 == x) {
done = true;
}
} catch (IOException e) {
success = false;
err = e;
if (VERBOSE) {
System.out.println(" hit IOException: " + e);
e.printStackTrace(System.out);
}
if (1 == x) {
e.printStackTrace(System.out);
fail(methodName + " hit IOException after disk space was freed up");
}
}
// Make sure all threads from
// ConcurrentMergeScheduler are done
_TestUtil.syncConcurrentMerges(writer);
if (VERBOSE) {
System.out.println(" now test readers");
}
// Finally, verify index is not corrupt, and, if
// we succeeded, we see all docs added, and if we
// failed, we see either all docs or no docs added
// (transactional semantics):
try {
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when creating IndexReader: " + e);
}
int result = reader.docFreq(searchTerm);
if (success) {
if (result != START_COUNT) {
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
}
} else {
// On hitting exception we still may have added
// all docs:
if (result != START_COUNT && result != END_COUNT) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
}
}
searcher = new IndexSearcher(reader);
try {
hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when searching: " + e);
}
int result2 = hits.length;
if (success) {
if (result2 != result) {
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
} else {
// On hitting exception we still may have added
// all docs:
if (result2 != result) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
}
searcher.close();
reader.close();
if (VERBOSE) {
System.out.println(" count is " + result);
}
if (done || result == END_COUNT) {
break;
}
}
if (VERBOSE) {
System.out.println(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
}
if (done) {
// Javadocs state that temp free Directory space
// required is at most 2X total input size of
// indices so let's make sure:
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
"starting disk usage = " + startDiskUsage + " bytes; " +
"input index disk usage = " + inputDiskUsage + " bytes",
(dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
}
// Make sure we don't hit disk full during close below:
dir.setMaxSizeInBytes(0);
dir.setRandomIOExceptionRate(0.0, 0);
writer.close();
// Wait for all BG threads to finish else
// dir.close() will throw IOException because
// there are still open files
_TestUtil.syncConcurrentMerges(ms);
dir.close();
// Try again with 5000 more bytes of free space:
diskFree += 5000;
}
}
startDir.close();
}
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public void testAddDocumentOnDiskFull() throws IOException {
for(int pass=0;pass<2;pass++) {
if (VERBOSE)
System.out.println("TEST: pass=" + pass);
boolean doAbort = pass == 1;
long diskFree = 200;
while(true) {
if (VERBOSE)
System.out.println("TEST: cycle: diskFree=" + diskFree);
MockRAMDirectory dir = new MockRAMDirectory();
dir.setMaxSizeInBytes(diskFree);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
MergeScheduler ms = writer.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
boolean hitError = false;
try {
for(int i=0;i<200;i++) {
addDoc(writer);
}
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on addDoc");
e.printStackTrace(System.out);
}
hitError = true;
}
if (hitError) {
if (doAbort) {
writer.rollback();
} else {
try {
writer.close();
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on close");
e.printStackTrace(System.out);
}
dir.setMaxSizeInBytes(0);
writer.close();
}
}
_TestUtil.syncConcurrentMerges(ms);
assertNoUnreferencedFiles(dir, "after disk full during addDocument");
// Make sure reader can open the index:
IndexReader.open(dir, true).close();
dir.close();
// Now try again w/ more space:
diskFree += 500;
} else {
_TestUtil.syncConcurrentMerges(writer);
dir.close();
break;
}
}
}
}
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
sis = new SegmentInfos();
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
writer.close();
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
}
dir.resetMaxUsedSizeInBytes();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimized used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (2*startDiskUsage) + " (= 2X starting usage)",
maxDiskUsage <= 2*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
File indexDir = _TestUtil.getTempDir("lucenetestindexwriter");
try {
Directory dir = FSDirectory.open(indexDir);
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
} finally {
rmDir(indexDir);
}
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testSimulatedCorruptIndex1() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
dir.deleteFile(fileNameIn);
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
}
public void testChangesAfterClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
public void testSimulatedCorruptIndex2() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
if (files[i].endsWith(".cfs")) {
dir.deleteFile(files[i]);
break;
}
}
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
}
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockRAMDirectory to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10).setMergeScheduler(
new SerialMergeScheduler()));
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 2X disk usage normally we allow 100X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 100X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage,
midDiskUsage < 100*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage,
endDiskUsage < 100*startDiskUsage);
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
}
public void testIndexNoDocuments() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
}
public void testManyFields() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.000001));
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
public void testChangingRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
if (j == 1)
lastFlushCount = flushCount;
else if (j < 10)
// No new files should be created
assertEquals(flushCount, lastFlushCount);
else if (10 == j) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (j < 20) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 30) {
assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (j < 40) {
assertTrue(flushCount> lastFlushCount);
lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 50) {
assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
dir.close();
}
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testChangingRAMBuffer2() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setMaxBufferedDeleteTerms(
10).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
writer.deleteDocuments(new Term("field", "aaa" + j));
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
if (j == 1)
lastFlushCount = flushCount;
else if (j < 10) {
// No new files should be created
assertEquals(flushCount, lastFlushCount);
} else if (10 == j) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(1);
} else if (j < 20) {
assertTrue(flushCount > lastFlushCount);
lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 30) {
assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
writer.setMaxBufferedDeleteTerms(1);
} else if (j < 40) {
assertTrue(flushCount> lastFlushCount);
lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
lastFlushCount = flushCount;
} else if (j < 50) {
assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
dir.close();
}
public void testDiverseDocs() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
Random rand = newRandom();
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends RAMDirectory {
private LockFactory myLockFactory;
MyRAMDirectory() {
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assert sis.size() == 10;
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = new MockRAMDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(101);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
private void rmDir(File dir) {
File[] files = dir.listFiles();
if (files != null) {
for (int i = 0; i < files.length; i++) {
files[i].delete();
}
}
dir.delete();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
public void testNoTermVectorAfterTermVector() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.optimize();
iw.close();
dir.close();
}
// LUCENE-1010
public void testNoTermVectorAfterTermVectorMerge() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
iw.commit();
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
iw.optimize();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.optimize();
iw.close();
dir.close();
}
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testDocumentsWriterAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
failure.setDoFail();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
try {
writer.addDocument(doc);
} catch (IOException ioe) {
// only one flush should fail:
assertFalse(hitError);
hitError = true;
}
}
assertTrue(hitError);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(198, reader.docFreq(new Term("content", "aa")));
reader.close();
}
private class CrashingFilter extends TokenFilter {
String fieldName;
int count;
public CrashingFilter(String fieldName, TokenStream input) {
super(input);
this.fieldName = fieldName;
}
@Override
public boolean incrementToken() throws IOException {
if (this.fieldName.equals("crash") && count++ >= 4)
throw new IOException("I'm experiencing problems");
return input.incrementToken();
}
@Override
public void reset() throws IOException {
super.reset();
count = 0;
}
}
public void testDocumentsWriterExceptions() throws IOException {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
int expected = 3+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(1, numDel);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
analyzer).setMaxBufferedDocs(10));
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected = 19+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testDocumentsWriterExceptionThreads() throws Exception {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
final int NUM_THREAD = 3;
final int NUM_ITER = 100;
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
{
final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
final int finalI = i;
Thread[] threads = new Thread[NUM_THREAD];
for(int t=0;t<NUM_THREAD;t++) {
threads[t] = new Thread() {
@Override
public void run() {
try {
for(int iter=0;iter<NUM_ITER;iter++) {
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == finalI) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
}
} catch (Throwable t) {
synchronized(this) {
System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
t.printStackTrace(System.out);
}
fail();
}
}
};
threads[t].start();
}
for(int t=0;t<NUM_THREAD;t++)
threads[t].join();
writer.close();
}
IndexReader reader = IndexReader.open(dir, true);
int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(NUM_THREAD*NUM_ITER, numDel);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected += 17-NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testVariableSchema() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
}
public void testNoWaitClose() throws Throwable {
RAMDirectory directory = new MockRAMDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE)
.setMaxBufferedDocs(2);
if (pass == 2) {
conf.setMergeScheduler(new SerialMergeScheduler());
}
IndexWriter writer = new IndexWriter(directory, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
//System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
for(int iter=0;iter<10;iter++) {
//System.out.println("TEST: iter=" + iter);
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0)
throw failure.get(0);
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
}
writer.close();
}
directory.close();
}
// Used by test cases below
private class IndexerThread extends Thread {
boolean diskFull;
Throwable error;
AlreadyClosedException ace;
IndexWriter writer;
boolean noErrors;
volatile int addCount;
public IndexerThread(IndexWriter writer, boolean noErrors) {
this.writer = writer;
this.noErrors = noErrors;
}
@Override
public void run() {
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
int idUpto = 0;
int fullCount = 0;
final long stopTime = System.currentTimeMillis() + 200;
do {
try {
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
addCount++;
} catch (IOException ioe) {
//System.out.println(Thread.currentThread().getName() + ": hit exc");
//ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at") ||
ioe.getMessage().equals("now failing on purpose")) {
diskFull = true;
try {
Thread.sleep(1);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
if (fullCount++ >= 5)
break;
} else {
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
ioe.printStackTrace(System.out);
error = ioe;
}
break;
}
} catch (Throwable t) {
//t.printStackTrace(System.out);
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
t.printStackTrace(System.out);
error = t;
}
break;
}
} while(System.currentTimeMillis() < stopTime);
}
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
public void testImmediateDiskFull() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Without fix for LUCENE-1130: this call will hang:
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
try {
writer.close(false);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
public void testImmediateDiskFullWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<10;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
dir.setMaxSizeInBytes(4*1024+20*iter);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
}
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
// Runs test, with one thread, using the specific failure
// to trigger an IOException
public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<6;i++)
writer.addDocument(doc);
dir.failOn(failure);
failure.setDoFail();
try {
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
fail("did not hit exception");
} catch (IOException ioe) {
}
failure.clearDoFail();
writer.addDocument(doc);
writer.close(false);
}
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
public void _testMultipleThreadsFailure(MockRAMDirectory.Failure failure) throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<2;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
Thread.sleep(10);
dir.failOn(failure);
failure.setDoFail();
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
assertTrue("hit unexpected Throwable ", threads[i].error == null);
}
boolean success = false;
try {
writer.close(false);
success = true;
} catch (IOException ioe) {
failure.clearDoFail();
writer.close(false);
}
if (success) {
IndexReader reader = IndexReader.open(dir, true);
for(int j=0;j<reader.maxDoc();j++) {
if (!reader.isDeleted(j)) {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
}
dir.close();
}
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbort() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
}
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStore() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
}
// Throws IOException during DocumentsWriter.writeSegment
private static class FailOnlyInWriteSegment extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInWriteSegment(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegment() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
public void testSegmentsChecksumError() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
IndexInput in = dir.openInput(segmentsFileName);
IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
out.copyBytes(in, in.length()-1);
byte b = in.readByte();
out.writeByte((byte) (1+b));
out.close();
in.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail("segmentInfos failed to retry fallback to correct segments_N file");
}
reader.close();
}
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// Throws IOException during MockRAMDirectory.sync
private static class FailOnlyInSync extends MockRAMDirectory.Failure {
boolean didFail;
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if (doFail && "org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
didFail = true;
throw new IOException("now failing on purpose during sync");
}
}
}
}
}
// LUCENE-1044: test exception during sync
public void testExceptionDuringSync() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInSync failure = new FailOnlyInSync();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
failure.setDoFail();
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
for (int i = 0; i < 23; i++) {
addDoc(writer);
if ((i-1)%2 == 0) {
try {
writer.commit();
} catch (IOException ioe) {
// expected
}
}
}
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
assertTrue(failure.didFail);
failure.clearDoFail();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption() throws IOException {
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<reader.numDocs();i++) {
reader.document(i);
reader.getTermFreqVectors(i);
}
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Directory[] indexDirs = {new MockRAMDirectory(dir)};
writer.addIndexes(indexDirs);
writer.optimize();
writer.close();
}
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption2() throws IOException {
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.getTermFreqVectors(0)==null);
assertTrue(reader.getTermFreqVectors(1)==null);
assertTrue(reader.getTermFreqVectors(2)!=null);
reader.close();
}
dir.close();
}
// LUCENE-1168
public void testTermVectorCorruption3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
new SerialMergeScheduler()).setMergePolicy(new LogDocMergePolicy()));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
new LogDocMergePolicy()));
for(int i=0;i<6;i++)
writer.addDocument(document);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<10;i++) {
reader.getTermFreqVectors(i);
reader.document(i);
}
reader.close();
dir.close();
}
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// Force many merges to happen
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
}
// LUCENE-1198
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
public void testExceptionJustBeforeFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
Document crashDoc = new Document();
crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
Field.Index.ANALYZED));
try {
w.addDocument(crashDoc, analyzer);
fail("did not hit expected exception");
} catch (IOException ioe) {
// expected
}
w.addDocument(doc);
w.close();
dir.close();
}
private static final class MockIndexWriter2 extends IndexWriter {
public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
boolean failed;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("startMergeInit")) {
failed = true;
throw new RuntimeException("intentionally failing");
}
return true;
}
}
// LUCENE-1210
public void testExceptionOnMergeInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
w.doFail = true;
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
for(int i=0;i<10;i++)
try {
w.addDocument(doc);
} catch (RuntimeException re) {
break;
}
((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
assertTrue(w.failed);
w.close();
dir.close();
}
private static final class MockIndexWriter3 extends IndexWriter {
public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
}
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter3 w = new MockIndexWriter3(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
private static class FailOnlyInCommit extends MockRAMDirectory.Failure {
boolean fail1, fail2;
@Override
public void eval(MockRAMDirectory dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean isCommit = false;
boolean isDelete = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
isCommit = true;
if ("org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
isDelete = true;
}
if (isCommit) {
if (!isDelete) {
fail1 = true;
throw new RuntimeException("now fail first");
} else {
fail2 = true;
throw new IOException("now fail during delete");
}
}
}
}
// LUCENE-1214
public void testExceptionsDuringCommit() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInCommit failure = new FailOnlyInCommit();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
dir.failOn(failure);
try {
w.close();
fail();
} catch (IOException ioe) {
fail("expected only RuntimeException");
} catch (RuntimeException re) {
// Expected
}
assertTrue(failure.fail1 && failure.fail2);
w.rollback();
dir.close();
}
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
Random r;
private int nextInt(int lim) {
return r.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (r.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
r = newRandom();
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
r = newRandom();
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
assertTrue(_TestUtil.checkIndex(dir));
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
private abstract static class RunAddIndexesThreads {
Directory dir, dir2;
final static int NUM_INIT_DOCS = 17;
IndexWriter writer2;
final List<Throwable> failures = new ArrayList<Throwable>();
volatile boolean didClose;
final IndexReader[] readers;
final int NUM_COPY;
final static int NUM_THREADS = 5;
final Thread[] threads = new Thread[NUM_THREADS];
public RunAddIndexesThreads(int numCopy) throws Throwable {
NUM_COPY = numCopy;
dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
for (int i = 0; i < NUM_INIT_DOCS; i++)
addDoc(writer);
writer.close();
dir2 = new MockRAMDirectory();
writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.commit();
readers = new IndexReader[NUM_COPY];
for(int i=0;i<NUM_COPY;i++)
readers[i] = IndexReader.open(dir, true);
}
void launchThreads(final int numIter) {
for(int i=0;i<NUM_THREADS;i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
final Directory[] dirs = new Directory[NUM_COPY];
for(int k=0;k<NUM_COPY;k++)
dirs[k] = new MockRAMDirectory(dir);
int j=0;
while(true) {
// System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
if (numIter > 0 && j == numIter)
break;
doBody(j++, dirs);
}
} catch (Throwable t) {
handle(t);
}
}
};
}
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
}
void joinThreads() throws Exception {
for(int i=0;i<NUM_THREADS;i++)
threads[i].join();
}
void close(boolean doWait) throws Throwable {
didClose = true;
writer2.close(doWait);
}
void closeDir() throws Throwable {
for(int i=0;i<NUM_COPY;i++)
readers[i].close();
dir2.close();
}
abstract void doBody(int j, Directory[] dirs) throws Throwable;
abstract void handle(Throwable t);
}
private class CommitAndAddIndexes extends RunAddIndexesThreads {
public CommitAndAddIndexes(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void handle(Throwable t) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
@Override
void doBody(int j, Directory[] dirs) throws Throwable {
switch(j%5) {
case 0:
writer2.addIndexes(dirs);
writer2.optimize();
break;
case 1:
writer2.addIndexes(dirs);
break;
case 2:
writer2.addIndexes(readers);
break;
case 3:
writer2.addIndexes(dirs);
writer2.maybeMerge();
break;
case 4:
writer2.commit();
}
}
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
public void testAddIndexesWithThreads() throws Throwable {
final int NUM_ITER = 15;
final int NUM_COPY = 3;
CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
c.launchThreads(NUM_ITER);
for(int i=0;i<100;i++)
addDoc(c.writer2);
c.joinThreads();
int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
assertEquals(expectedNumDocs, c.writer2.numDocs());
c.close(true);
assertTrue(c.failures.size() == 0);
_TestUtil.checkIndex(c.dir2);
IndexReader reader = IndexReader.open(c.dir2, true);
assertEquals(expectedNumDocs, reader.numDocs());
reader.close();
c.closeDir();
}
private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
public CommitAndAddIndexes2(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void handle(Throwable t) {
if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
}
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithClose() throws Throwable {
final int NUM_COPY = 3;
CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
//c.writer2.setInfoStream(System.out);
c.launchThreads(-1);
// Close w/o first stopping/joining the threads
c.close(true);
//c.writer2.close();
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
public CommitAndAddIndexes3(int numCopy) throws Throwable {
super(numCopy);
}
@Override
void doBody(int j, Directory[] dirs) throws Throwable {
switch(j%5) {
case 0:
writer2.addIndexes(dirs);
writer2.optimize();
break;
case 1:
writer2.addIndexes(dirs);
break;
case 2:
writer2.addIndexes(readers);
break;
case 3:
writer2.optimize();
case 4:
writer2.commit();
}
}
@Override
void handle(Throwable t) {
boolean report = true;
if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
report = !didClose;
} else if (t instanceof IOException) {
Throwable t2 = t.getCause();
if (t2 instanceof MergePolicy.MergeAbortedException) {
report = !didClose;
}
}
if (report) {
t.printStackTrace(System.out);
synchronized(failures) {
failures.add(t);
}
}
}
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithCloseNoWait() throws Throwable {
final int NUM_COPY = 50;
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
c.launchThreads(-1);
Thread.sleep(500);
// Close w/o first stopping/joining the threads
c.close(false);
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
// LUCENE-1335: test simultaneous addIndexes & close
public void testAddIndexesWithRollback() throws Throwable {
final int NUM_COPY = 50;
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
c.launchThreads(-1);
Thread.sleep(500);
// Close w/o first stopping/joining the threads
c.didClose = true;
c.writer2.rollback();
c.joinThreads();
_TestUtil.checkIndex(c.dir2);
c.closeDir();
assertTrue(c.failures.size() == 0);
}
// LUCENE-1347
private static final class MockIndexWriter4 extends IndexWriter {
public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("rollback before checkpoint"))
throw new RuntimeException("intentionally failing");
return true;
}
}
// LUCENE-1347
public void testRollbackExceptionHang() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter4 w = new MockIndexWriter4(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(w);
w.doFail = true;
try {
w.rollback();
fail("did not hit intentional RuntimeException");
} catch (RuntimeException re) {
// expected
}
w.doFail = false;
w.rollback();
}
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
public void testOptimizeExceptions() throws IOException {
RAMDirectory startDir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
IndexWriter w = new IndexWriter(startDir, conf);
for(int i=0;i<27;i++)
addDoc(w);
w.close();
for(int i=0;i<200;i++) {
MockRAMDirectory dir = new MockRAMDirectory(startDir);
conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
w = new IndexWriter(dir, conf);
dir.setRandomIOExceptionRate(0.5, 100);
try {
w.optimize();
} catch (IOException ioe) {
if (ioe.getCause() == null)
fail("optimize threw IOException without root cause");
}
w.close();
dir.close();
}
}
// LUCENE-1429
public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
final List<Throwable> thrown = new ArrayList<Throwable>();
final IndexWriter writer = new IndexWriter(new MockRAMDirectory(),
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())) {
@Override
public void message(final String message) {
if (message.startsWith("now flush at close") && 0 == thrown.size()) {
thrown.add(null);
throw new OutOfMemoryError("fake OOME at " + message);
}
}
};
// need to set an info stream so message is called
writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
try {
writer.close();
fail("OutOfMemoryError expected");
}
catch (final OutOfMemoryError expected) {}
// throws IllegalStateEx w/o bug fix
writer.close();
}
// LUCENE-1442
public void testDoubleOffsetCounting() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
Field f2 = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
// Token "" occurred once
assertEquals(1, termOffsets.length);
assertEquals(8, termOffsets[0].getStartOffset());
assertEquals(8, termOffsets[0].getEndOffset());
// Token "abcd" occurred three times
termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(1);
assertEquals(3, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(4, termOffsets[1].getStartOffset());
assertEquals(8, termOffsets[1].getEndOffset());
assertEquals(8, termOffsets[2].getStartOffset());
assertEquals(12, termOffsets[2].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1442
public void testDoubleOffsetCounting2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(5, termOffsets[1].getStartOffset());
assertEquals(9, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionCharAnalyzer() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(8, termOffsets[1].getStartOffset());
assertEquals(12, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
Analyzer analyzer = new MockAnalyzer();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd ")));
Field f = new Field("field", stream, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(8, termOffsets[1].getStartOffset());
assertEquals(12, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStopFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
Document doc = new Document();
Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
assertEquals(9, termOffsets[1].getStartOffset());
assertEquals(13, termOffsets[1].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandard() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd the ", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(11, termOffsets[0].getStartOffset());
assertEquals(17, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(2);
assertEquals(18, termOffsets[0].getStartOffset());
assertEquals(21, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(6, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(7, termOffsets[0].getStartOffset());
assertEquals(10, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field f2 = new Field("field", "crunch", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
assertEquals(4, termOffsets[0].getEndOffset());
termOffsets = tpv.getOffsets(1);
assertEquals(5, termOffsets[0].getStartOffset());
assertEquals(11, termOffsets[0].getEndOffset());
r.close();
dir.close();
}
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
File indexDir = new File(TEMP_DIR, "otherfiles");
Directory dir = FSDirectory.open(indexDir);
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
_TestUtil.rmDir(indexDir);
}
}
public void testDeadlock() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
MockRAMDirectory dir2 = new MockRAMDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(new IndexReader[] {r1, r2});
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
boolean allowInterrupt = false;
@Override
public void run() {
RAMDirectory dir = new RAMDirectory();
IndexWriter w = null;
boolean first = true;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
//((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions();
if (!first && !allowInterrupt) {
// tell main thread it can interrupt us any time,
// starting now
allowInterrupt = true;
}
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
w.commit();
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
if (first && !allowInterrupt) {
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
first = false;
}
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
// Make sure IW cleared the interrupted bit
// TODO: remove that false once test is fixed for real
if (false && interrupted()) {
System.out.println("FAILED; InterruptedException hit but thread.interrupted() was true");
e.printStackTrace(System.out);
failed = true;
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(1);
if (t.allowInterrupt) {
i++;
t.allowInterrupt = false;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.allowInterrupt = false;
t.finish = true;
t.interrupt();
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = new MockRAMDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = new MockRAMDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = new MockRAMDirectory();
final IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
w.close();
dir.close();
assertFalse(failed.get());
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(""+i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader(2).getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocsEnum.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = new MockRAMDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
public void doAfterFlush() {
flushCount++;
}
}
// nocommit - TODO: enable when flushing by RAM is implemented
public void _testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
Directory dir = new MockRAMDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
FSDirectory dir = FSDirectory.open(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
// Creating over empty dir should not create any files.
assertEquals(0, dir.listAll().length);
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
}
public void testNoSegmentFile() throws IOException {
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
}
public void testFutureCommit() throws Exception {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
}
MergeMethods
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
<<<<<<< MINE
=======
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
>>>>>>> YOURS
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
// LUCENE-1010
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
// Runs test, with one thread, using the specific failure
// to trigger an IOException
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1044: test exception during sync
// LUCENE-1168
// LUCENE-1168
// LUCENE-1168
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
// LUCENE-1210
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1214
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
<<<<<<< MINE
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1347
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1429
// LUCENE-1442
// LUCENE-1442
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
<<<<<<< MINE
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
<<<<<<< MINE
Random rnd = random;
Directory dir = newDirectory();
=======
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
public void _testIndexingThenDeleting() throws Exception {
final Random r = random;
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
<<<<<<< MINE
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
=======
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
>>>>>>> YOURS
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
<<<<<<< MINE
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
=======
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
>>>>>>> YOURS
}
public void testFutureCommit() throws Exception {
<<<<<<< MINE
Directory dir = newDirectory();
=======
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
dir.close();
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
<<<<<<< MINE
=======
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
>>>>>>> YOURS
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
// LUCENE-1010
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
// Runs test, with one thread, using the specific failure
// to trigger an IOException
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1044: test exception during sync
// LUCENE-1168
// LUCENE-1168
// LUCENE-1168
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
// LUCENE-1210
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1214
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
<<<<<<< MINE
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1347
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1429
// LUCENE-1442
// LUCENE-1442
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
<<<<<<< MINE
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
<<<<<<< MINE
Random rnd = random;
Directory dir = newDirectory();
=======
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
public void _testIndexingThenDeleting() throws Exception {
final Random r = random;
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
<<<<<<< MINE
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
=======
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
>>>>>>> YOURS
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
<<<<<<< MINE
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
=======
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
>>>>>>> YOURS
}
public void testFutureCommit() throws Exception {
<<<<<<< MINE
Directory dir = newDirectory();
=======
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
dir.close();
}
}
KeepBothMethods
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
<<<<<<< MINE
=======
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
>>>>>>> YOURS
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
// LUCENE-1010
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
// Runs test, with one thread, using the specific failure
// to trigger an IOException
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1044: test exception during sync
// LUCENE-1168
// LUCENE-1168
// LUCENE-1168
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
// LUCENE-1210
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1214
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
<<<<<<< MINE
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1347
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1429
// LUCENE-1442
// LUCENE-1442
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
<<<<<<< MINE
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
<<<<<<< MINE
Random rnd = random;
Directory dir = newDirectory();
=======
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
public void testIndexingThenDeleting() throws Exception {
final Random r = random;
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
// nocommit - TODO: enable when flushing by RAM is implemented
public void _testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
Directory dir = new MockRAMDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
<<<<<<< MINE
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
=======
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
>>>>>>> YOURS
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
<<<<<<< MINE
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
=======
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
>>>>>>> YOURS
}
public void testFutureCommit() throws Exception {
<<<<<<< MINE
Directory dir = newDirectory();
=======
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
dir.close();
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
<<<<<<< MINE
=======
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
>>>>>>> YOURS
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
// LUCENE-1010
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
// Runs test, with one thread, using the specific failure
// to trigger an IOException
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1044: test exception during sync
// LUCENE-1168
// LUCENE-1168
// LUCENE-1168
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
// LUCENE-1210
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1214
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
<<<<<<< MINE
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1347
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1429
// LUCENE-1442
// LUCENE-1442
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
<<<<<<< MINE
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
<<<<<<< MINE
Random rnd = random;
Directory dir = newDirectory();
=======
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
public void testIndexingThenDeleting() throws Exception {
final Random r = random;
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
// nocommit - TODO: enable when flushing by RAM is implemented
public void _testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
Directory dir = new MockRAMDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
<<<<<<< MINE
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
=======
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
>>>>>>> YOURS
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
<<<<<<< MINE
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
=======
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
>>>>>>> YOURS
}
public void testFutureCommit() throws Exception {
<<<<<<< MINE
Directory dir = newDirectory();
=======
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
dir.close();
}
}
Safe
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
<<<<<<< MINE
=======
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
>>>>>>> YOURS
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
<<<<<<< MINE
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
=======
>>>>>>> YOURS
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
// LUCENE-1010
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1072
<<<<<<< MINE
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
=======
>>>>>>> YOURS
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
<<<<<<< MINE
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
=======
>>>>>>> YOURS
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
// Runs test, with one thread, using the specific failure
// to trigger an IOException
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1044: test exception during sync
// LUCENE-1168
// LUCENE-1168
// LUCENE-1168
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
<<<<<<< MINE
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
=======
>>>>>>> YOURS
}
<<<<<<< MINE
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
=======
>>>>>>> YOURS
// LUCENE-1208
// LUCENE-1210
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1214
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
<<<<<<< MINE
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1347
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1429
// LUCENE-1442
// LUCENE-1442
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
<<<<<<< MINE
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
<<<<<<< MINE
Random rnd = random;
Directory dir = newDirectory();
=======
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
<<<<<<< MINE
public void _testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
Directory dir = new MockRAMDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
=======
public void testIndexingThenDeleting() throws Exception {
final Random r = random;
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
>>>>>>> YOURS
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
<<<<<<< MINE
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
=======
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
>>>>>>> YOURS
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
<<<<<<< MINE
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
=======
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
>>>>>>> YOURS
}
public void testFutureCommit() throws Exception {
<<<<<<< MINE
Directory dir = newDirectory();
=======
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
dir.close();
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
<<<<<<< MINE
=======
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
>>>>>>> YOURS
<<<<<<< MINE
=======
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail = false;
int count;
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
>>>>>>> YOURS
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
*/
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
<<<<<<< MINE
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
=======
>>>>>>> YOURS
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
/**
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
* write session
*
* @deprecated after all the setters on IW go away (4.0), this test can be
* removed because changing ram buffer settings during a write
* session won't be possible.
*/
/**
* @deprecated after setters on IW go away, this test can be deleted because
* changing those settings on IW won't be possible.
*/
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1008
// LUCENE-1010
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1072
<<<<<<< MINE
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
dir.close();
}
=======
>>>>>>> YOURS
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
<<<<<<< MINE
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
=======
>>>>>>> YOURS
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
// Runs test, with one thread, using the specific failure
// to trigger an IOException
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in closeDocStore, with threads
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1130: test IOException in writeSegment, with threads
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: Simulate checksum error in segments_N
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1044: test exception during sync
// LUCENE-1168
// LUCENE-1168
// LUCENE-1168
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
<<<<<<< MINE
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
=======
>>>>>>> YOURS
}
<<<<<<< MINE
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
=======
>>>>>>> YOURS
// LUCENE-1208
// LUCENE-1210
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1214
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
<<<<<<< MINE
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1335: test simultaneous addIndexes & commits
// from multiple threads
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1335: test simultaneous addIndexes & close
// LUCENE-1347
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1429
// LUCENE-1442
// LUCENE-1442
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1448
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
<<<<<<< MINE
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
<<<<<<< MINE
Random rnd = random;
Directory dir = newDirectory();
=======
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
<<<<<<< MINE
public void _testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
Directory dir = new MockRAMDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
=======
public void testIndexingThenDeleting() throws Exception {
final Random r = random;
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
>>>>>>> YOURS
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
<<<<<<< MINE
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
=======
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
>>>>>>> YOURS
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
<<<<<<< MINE
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
=======
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
>>>>>>> YOURS
}
public void testFutureCommit() throws Exception {
<<<<<<< MINE
Directory dir = newDirectory();
=======
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
dir.close();
}
}
Unstructured
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
<<<<<<< MINE
import org.apache.lucene.search.TopDocs;
=======
>>>>>>> YOURS
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
<<<<<<< MINE
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
=======
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
} finally {
rmDir(indexDir);
}
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testSimulatedCorruptIndex1() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
dir.deleteFile(fileNameIn);
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
>>>>>>> YOURS
}
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
<<<<<<< MINE
=======
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
>>>>>>> YOURS
dir.close();
}
<<<<<<< MINE
=======
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testDocumentsWriterAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
failure.setDoFail();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
try {
writer.addDocument(doc);
} catch (IOException ioe) {
// only one flush should fail:
assertFalse(hitError);
hitError = true;
}
}
assertTrue(hitError);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(198, reader.docFreq(new Term("content", "aa")));
reader.close();
}
private class CrashingFilter extends TokenFilter {
String fieldName;
int count;
public CrashingFilter(String fieldName, TokenStream input) {
super(input);
this.fieldName = fieldName;
}
@Override
public boolean incrementToken() throws IOException {
if (this.fieldName.equals("crash") && count++ >= 4)
throw new IOException("I'm experiencing problems");
return input.incrementToken();
}
@Override
public void reset() throws IOException {
super.reset();
count = 0;
}
}
public void testDocumentsWriterExceptions() throws IOException {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
int expected = 3+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(1, numDel);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
analyzer).setMaxBufferedDocs(10));
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected = 19+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testDocumentsWriterExceptionThreads() throws Exception {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
final int NUM_THREAD = 3;
final int NUM_ITER = 100;
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
{
final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
final int finalI = i;
Thread[] threads = new Thread[NUM_THREAD];
for(int t=0;t<NUM_THREAD;t++) {
threads[t] = new Thread() {
@Override
public void run() {
try {
for(int iter=0;iter<NUM_ITER;iter++) {
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == finalI) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
}
} catch (Throwable t) {
synchronized(this) {
System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
t.printStackTrace(System.out);
}
fail();
}
}
};
threads[t].start();
}
for(int t=0;t<NUM_THREAD;t++)
threads[t].join();
writer.close();
}
IndexReader reader = IndexReader.open(dir, true);
int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(NUM_THREAD*NUM_ITER, numDel);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected += 17-NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
>>>>>>> YOURS
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(newField("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(newField("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(newField("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(newField("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(newField("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
<<<<<<< MINE
=======
// Used by test cases below
private class IndexerThread extends Thread {
boolean diskFull;
Throwable error;
AlreadyClosedException ace;
IndexWriter writer;
boolean noErrors;
volatile int addCount;
public IndexerThread(IndexWriter writer, boolean noErrors) {
this.writer = writer;
this.noErrors = noErrors;
}
@Override
public void run() {
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
int idUpto = 0;
int fullCount = 0;
final long stopTime = System.currentTimeMillis() + 200;
do {
try {
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
addCount++;
} catch (IOException ioe) {
//System.out.println(Thread.currentThread().getName() + ": hit exc");
//ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at") ||
ioe.getMessage().equals("now failing on purpose")) {
diskFull = true;
try {
Thread.sleep(1);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
if (fullCount++ >= 5)
break;
} else {
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
ioe.printStackTrace(System.out);
error = ioe;
}
break;
}
} catch (Throwable t) {
//t.printStackTrace(System.out);
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
t.printStackTrace(System.out);
error = t;
}
break;
}
} while(System.currentTimeMillis() < stopTime);
}
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
public void testImmediateDiskFull() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Without fix for LUCENE-1130: this call will hang:
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
try {
writer.close(false);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
public void testImmediateDiskFullWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<10;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
dir.setMaxSizeInBytes(4*1024+20*iter);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
}
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
// Runs test, with one thread, using the specific failure
// to trigger an IOException
public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<6;i++)
writer.addDocument(doc);
dir.failOn(failure);
failure.setDoFail();
try {
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
fail("did not hit exception");
} catch (IOException ioe) {
}
failure.clearDoFail();
writer.addDocument(doc);
writer.close(false);
}
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
public void _testMultipleThreadsFailure(MockRAMDirectory.Failure failure) throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<2;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
Thread.sleep(10);
dir.failOn(failure);
failure.setDoFail();
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
assertTrue("hit unexpected Throwable ", threads[i].error == null);
}
boolean success = false;
try {
writer.close(false);
success = true;
} catch (IOException ioe) {
failure.clearDoFail();
writer.close(false);
}
if (success) {
IndexReader reader = IndexReader.open(dir, true);
for(int j=0;j<reader.maxDoc();j++) {
if (!reader.isDeleted(j)) {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
}
dir.close();
}
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbort() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
}
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStore() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
}
// Throws IOException during DocumentsWriter.writeSegment
private static class FailOnlyInWriteSegment extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInWriteSegment(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegment() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
}
>>>>>>> YOURS
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
<<<<<<< MINE
=======
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
public void testExceptionJustBeforeFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
Document crashDoc = new Document();
crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
Field.Index.ANALYZED));
try {
w.addDocument(crashDoc, analyzer);
fail("did not hit expected exception");
} catch (IOException ioe) {
// expected
}
w.addDocument(doc);
w.close();
dir.close();
}
private static final class MockIndexWriter2 extends IndexWriter {
public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
boolean failed;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("startMergeInit")) {
failed = true;
throw new RuntimeException("intentionally failing");
}
return true;
}
}
// LUCENE-1210
public void testExceptionOnMergeInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
w.doFail = true;
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
for(int i=0;i<10;i++)
try {
w.addDocument(doc);
} catch (RuntimeException re) {
break;
}
((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
assertTrue(w.failed);
w.close();
dir.close();
}
private static final class MockIndexWriter3 extends IndexWriter {
public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
>>>>>>> YOURS
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
}
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
<<<<<<< MINE
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
<<<<<<< MINE
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
<<<<<<< MINE
Random rnd = random;
Directory dir = newDirectory();
=======
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
<<<<<<< MINE
public void testIndexingThenDeleting() throws Exception {
final Random r = random;
=======
// nocommit - TODO: enable when flushing by RAM is implemented
public void _testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
>>>>>>> YOURS
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
<<<<<<< MINE
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
=======
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
>>>>>>> YOURS
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
<<<<<<< MINE
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
=======
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
>>>>>>> YOURS
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
<<<<<<< MINE
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
}
public void testFutureCommit() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
=======
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
}
public void testFutureCommit() throws Exception {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
>>>>>>> YOURS
dir.close();
}
}package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
<<<<<<< MINE
import org.apache.lucene.search.TopDocs;
=======
>>>>>>> YOURS
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
public void testDocCount() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
try {
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
} finally {
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
}
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// make sure opening a new index for create over
// this existing one works correctly:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
dir.close();
}
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, CodecProvider.getDefault());
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
Arrays.sort(endFiles);
if (!Arrays.equals(startFiles, endFiles)) {
fail(message + ": before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
}
}
public void testOptimizeMaxNumSegments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(5);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
ldmp));
for(int j=0;j<numDocs;j++)
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
new MockAnalyzer()).setMergePolicy(ldmp));
writer.optimize(3);
writer.close();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 3)
assertEquals(segCount, optSegCount);
else
assertEquals(3, optSegCount);
}
dir.close();
}
public void testOptimizeMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
ldmp.setMergeFactor(4);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
for(int iter=0;iter<10;iter++) {
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.commit();
writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
final int segCount = sis.size();
writer.optimize(7);
writer.commit();
writer.waitForMerges();
sis = new SegmentInfos();
sis.read(dir);
final int optSegCount = sis.size();
if (segCount < 7)
assertEquals(segCount, optSegCount);
else
assertEquals(7, optSegCount);
}
writer.close();
dir.close();
}
/**
* Make sure optimize doesn't use any more than 1X
* starting index size as its temporary free space
* required.
*/
public void testOptimizeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
if (VERBOSE) {
System.out.println("TEST: config1=" + writer.getConfig());
}
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
// force one extra segment w/ different doc store so
// we see the doc stores get merged
writer.commit();
addDocWithIndex(writer, 500);
writer.close();
if (VERBOSE) {
System.out.println("TEST: start disk usage");
}
long startDiskUsage = 0;
String[] files = dir.listAll();
for(int i=0;i<files.length;i++) {
startDiskUsage += dir.fileLength(files[i]);
if (VERBOSE) {
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
}
}
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
// Import to use same term index interval else a
// smaller one here could increase the disk usage and
// cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
static String arrayToString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
// Make sure we can open an index for create even when a
// reader holds it open (this fails pre lock-less
// commits on windows):
public void testCreateWithReader() throws IOException {
Directory dir = newDirectory();
// add one document & close writer
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
<<<<<<< MINE
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
dir.close();
=======
// now open index for create:
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
} finally {
rmDir(indexDir);
}
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
// and that we can add to the index:
public void testSimulatedCrashedWriter() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
// Make the next segments file, with last byte
// missing, to simulate a writer that crashed while
// writing segments file:
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
reader.close();
try {
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("writer failed to open on a crashed index");
}
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
}
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testSimulatedCorruptIndex1() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
1+gen);
IndexInput in = dir.openInput(fileNameIn);
IndexOutput out = dir.createOutput(fileNameOut);
long length = in.length();
for(int i=0;i<length-1;i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
dir.deleteFile(fileNameIn);
IndexReader reader = null;
try {
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
if (reader != null) {
reader.close();
}
>>>>>>> YOURS
}
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
reader.close();
dir.close();
}
/*
* Simple test for "commit on close": open writer, then
* add a bunch of docs, making sure reader does not see
* them until writer has closed. Then instead of
* closing the writer, call abort and verify reader sees
* nothing was added. Then verify we can open the index
* and add docs to it.
*/
public void testCommitOnCloseAbort() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
for(int j=0;j<17;j++) {
addDoc(writer);
}
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
// Now, close the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
dir.setPreventDoubleWrite(false);
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
dir.close();
}
/*
* Verify that a writer with "commit on close" indeed
* cleans up the temp segments created after opening
* that are not referenced by the starting segments
* file. We check this by using MockDirectoryWrapper to
* measure max temp disk space used.
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
writer.close();
dir.resetMaxUsedSizeInBytes();
dir.setTrackDiskUsage(true);
long startDiskUsage = dir.getMaxUsedSizeInBytes();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergeScheduler(new SerialMergeScheduler()).
setReaderPooling(false).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<1470;j++) {
addDocWithIndex(writer, j);
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
writer.optimize();
writer.close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
// to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
// will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
midDiskUsage < 150*startDiskUsage);
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
endDiskUsage < 150*startDiskUsage);
dir.close();
}
/*
* Verify that calling optimize when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
public void testCommitOnCloseOptimize() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(10))
);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.optimize();
if (VERBOSE) {
writer.setInfoStream(System.out);
}
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
// Abort the writer:
writer.rollback();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
if (VERBOSE) {
System.out.println("TEST: do real optimize");
}
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
if (VERBOSE) {
writer.setInfoStream(System.out);
}
writer.optimize();
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
reader.close();
dir.close();
}
public void testIndexNoDocuments() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
public void testManyFields() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
}
reader.close();
dir.close();
}
public void testSmallRAMBuffer() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setRAMBufferSizeMB(0.000001).
setMergePolicy(newLogMergePolicy(10))
);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
assertTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.close();
dir.close();
}
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
for(int i=0;i<3;i++) {
// First, docs where every term is unique (heavy on
// Posting instances)
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
String longTerm = b.toString();
Document doc = new Document();
doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
dir.close();
}
public void testEnablingNorms() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
doc.add(f);
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
}
public void testHighFreqTerm() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01));
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
DocsEnum td = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("a"));
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
dir.close();
}
// Make sure that a Directory implementation that does
// not use LockFactory at all (ie overrides makeLock and
// implements its own private locking) works OK. This
// was raised on java-dev as loss of backwards
// compatibility.
public void testNullLockFactory() throws IOException {
final class MyRAMDirectory extends MockDirectoryWrapper {
private LockFactory myLockFactory;
MyRAMDirectory(Directory delegate) {
super(random, delegate);
lockFactory = null;
myLockFactory = new SingleInstanceLockFactory();
}
@Override
public Lock makeLock(String name) {
return myLockFactory.makeLock(name);
}
}
Directory dir = new MyRAMDirectory(new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
// Make sure we can flush segment w/ norms, then add
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
// Test calling optimize(false) whereby optimize is kicked
// off but we don't wait for it to finish (but
// writer.close()) does wait
public void testBackgroundOptimize() throws IOException {
Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(101))
);
Document doc = new Document();
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
// Get another segment to flush so we can verify it is
// NOT included in the optimization
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
assertEquals(2, infos.size());
}
}
dir.close();
}
/**
* Test that no NullPointerException will be raised,
* when adding one document with a single, empty field
* and term vectors enabled.
* @throws IOException
*
*/
public void testBadSegment() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
}
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs
private class MyMergeScheduler extends MergeScheduler {
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
for(int i=0;i<merge.segments.size();i++)
assert merge.segments.info(i).docCount < 20;
writer.merge(merge);
}
}
@Override
public void close() {}
}
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMaxMergeDocs(20);
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
iw.close();
<<<<<<< MINE
=======
}
// LUCENE-1072
public void testExceptionFromTokenStream() throws IOException {
RAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
private int count = 0;
@Override
public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
return input.incrementToken();
}
};
}
});
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (Exception e) {
}
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
// Make sure the doc that hit the exception was marked
// as deleted:
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
t.field(),
new BytesRef(t.text()));
int count = 0;
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
assertEquals(2, count);
assertEquals(reader.docFreq(new Term("content", "gg")), 0);
reader.close();
>>>>>>> YOURS
dir.close();
}
<<<<<<< MINE
=======
@Override
public void setDoFail() {
this.doFail = true;
}
@Override
public void clearDoFail() {
this.doFail = false;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean sawAppend = false;
boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
if ("org.apache.lucene.index.FreqProxTermsWriterPerField".equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
sawAppend = true;
if ("flushSegment".equals(trace[i].getMethodName()))
sawFlush = true;
}
if (sawAppend && sawFlush && count++ >= 30) {
doFail = false;
throw new IOException("now failing during flush");
}
}
}
}
// LUCENE-1072: make sure an errant exception on flushing
// one segment only takes out those docs in that one flush
public void testDocumentsWriterAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
failure.setDoFail();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
try {
writer.addDocument(doc);
} catch (IOException ioe) {
// only one flush should fail:
assertFalse(hitError);
hitError = true;
}
}
assertTrue(hitError);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(198, reader.docFreq(new Term("content", "aa")));
reader.close();
}
private class CrashingFilter extends TokenFilter {
String fieldName;
int count;
public CrashingFilter(String fieldName, TokenStream input) {
super(input);
this.fieldName = fieldName;
}
@Override
public boolean incrementToken() throws IOException {
if (this.fieldName.equals("crash") && count++ >= 4)
throw new IOException("I'm experiencing problems");
return input.incrementToken();
}
@Override
public void reset() throws IOException {
super.reset();
count = 0;
}
}
public void testDocumentsWriterExceptions() throws IOException {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir, true);
int expected = 3+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(1, numDel);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
analyzer).setMaxBufferedDocs(10));
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected = 19+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
public void testDocumentsWriterExceptionThreads() throws Exception {
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
final int NUM_THREAD = 3;
final int NUM_ITER = 100;
for(int i=0;i<2;i++) {
MockRAMDirectory dir = new MockRAMDirectory();
{
final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
final int finalI = i;
Thread[] threads = new Thread[NUM_THREAD];
for(int t=0;t<NUM_THREAD;t++) {
threads[t] = new Thread() {
@Override
public void run() {
try {
for(int iter=0;iter<NUM_ITER;iter++) {
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
} catch (IOException ioe) {
}
if (0 == finalI) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
}
} catch (Throwable t) {
synchronized(this) {
System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
t.printStackTrace(System.out);
}
fail();
}
}
};
threads[t].start();
}
for(int t=0;t<NUM_THREAD;t++)
threads[t].join();
writer.close();
}
IndexReader reader = IndexReader.open(dir, true);
int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
int numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(NUM_THREAD*NUM_ITER, numDel);
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, true);
expected += 17-NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
numDel = 0;
for(int j=0;j<reader.maxDoc();j++) {
if (reader.isDeleted(j))
numDel++;
else {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
assertEquals(0, numDel);
dir.close();
}
}
>>>>>>> YOURS
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setMergeFactor(2);
lmp.setUseCompoundFile(false);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
doc.add(newField("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(newField("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(newField("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(newField("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(newField("content5", "", storeVal,
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp2.setUseCompoundFile(false);
writer.optimize();
writer.close();
}
}
dir.close();
}
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
// have to use compound file to prevent running out of
// descripters when newDirectory returns a file-system
// backed directory:
setMergePolicy(newLogMergePolicy(false, 10))
);
writer.setInfoStream(VERBOSE ? System.out : null);
for(int iter=0;iter<10;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for(int j=0;j<199;j++) {
idField.setValue(Integer.toString(iter*201+j));
writer.addDocument(doc);
}
int delID = iter*199;
for(int j=0;j<20;j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while(!done) {
for(int i=0;i<100;i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.add(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
if (failure.size() > 0) {
throw failure.get(0);
}
t1.start();
writer.close(false);
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
}
writer.close();
}
directory.close();
}
<<<<<<< MINE
=======
// Used by test cases below
private class IndexerThread extends Thread {
boolean diskFull;
Throwable error;
AlreadyClosedException ace;
IndexWriter writer;
boolean noErrors;
volatile int addCount;
public IndexerThread(IndexWriter writer, boolean noErrors) {
this.writer = writer;
this.noErrors = noErrors;
}
@Override
public void run() {
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
int idUpto = 0;
int fullCount = 0;
final long stopTime = System.currentTimeMillis() + 200;
do {
try {
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
addCount++;
} catch (IOException ioe) {
//System.out.println(Thread.currentThread().getName() + ": hit exc");
//ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at") ||
ioe.getMessage().equals("now failing on purpose")) {
diskFull = true;
try {
Thread.sleep(1);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
if (fullCount++ >= 5)
break;
} else {
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
ioe.printStackTrace(System.out);
error = ioe;
}
break;
}
} catch (Throwable t) {
//t.printStackTrace(System.out);
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
t.printStackTrace(System.out);
error = t;
}
break;
}
} while(System.currentTimeMillis() < stopTime);
}
}
// LUCENE-1130: make sure we can close() even while
// threads are trying to add documents. Strictly
// speaking, this isn't valid us of Lucene's APIs, but we
// still want to be robust to this case:
public void testCloseWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
// We expect AlreadyClosedException
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
boolean done = false;
while(!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
if (threads[i].addCount > 0) {
done = true;
break;
}
}
writer.close(false);
// Make sure threads that are adding docs are not hung:
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
if (threads[i].isAlive())
fail("thread seems to be hung");
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir, true);
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
MultiFields.getDeletedDocs(reader),
"field",
new BytesRef("aaa"));
int count = 0;
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
count++;
}
assertTrue(count > 0);
reader.close();
dir.close();
}
}
// LUCENE-1130: make sure immeidate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()) is
// OK:
public void testImmediateDiskFull() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Without fix for LUCENE-1130: this call will hang:
try {
writer.addDocument(doc);
fail("did not hit disk full");
} catch (IOException ioe) {
}
try {
writer.close(false);
fail("did not hit disk full");
} catch (IOException ioe) {
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
// LUCENE-1130: make sure immediate disk full on creating
// an IndexWriter (hit during DW.ThreadState.init()), with
// multiple threads, is OK:
public void testImmediateDiskFullWithThreads() throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<10;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
dir.setMaxSizeInBytes(4*1024+20*iter);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
for(int i=0;i<NUM_THREADS;i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
// Make sure once disk space is avail again, we can
// cleanly close:
dir.setMaxSizeInBytes(0);
writer.close(false);
dir.close();
}
}
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("abort".equals(trace[i].getMethodName()) ||
"finishDocument".equals(trace[i].getMethodName())) {
if (onlyOnce)
doFail = false;
//System.out.println(Thread.currentThread().getName() + ": now fail");
//new Throwable().printStackTrace(System.out);
throw new IOException("now failing on purpose");
}
}
}
}
}
// Runs test, with one thread, using the specific failure
// to trigger an IOException
public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<6;i++)
writer.addDocument(doc);
dir.failOn(failure);
failure.setDoFail();
try {
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
fail("did not hit exception");
} catch (IOException ioe) {
}
failure.clearDoFail();
writer.addDocument(doc);
writer.close(false);
}
// Runs test, with multiple threads, using the specific
// failure to trigger an IOException
public void _testMultipleThreadsFailure(MockRAMDirectory.Failure failure) throws Exception {
int NUM_THREADS = 3;
for(int iter=0;iter<2;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setMaxBufferedDocs(2);
// We expect disk full exceptions in the merge threads
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
IndexWriter writer = new IndexWriter(dir, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
threads[i].start();
Thread.sleep(10);
dir.failOn(failure);
failure.setDoFail();
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
assertTrue("hit unexpected Throwable ", threads[i].error == null);
}
boolean success = false;
try {
writer.close(false);
success = true;
} catch (IOException ioe) {
failure.clearDoFail();
writer.close(false);
}
if (success) {
IndexReader reader = IndexReader.open(dir, true);
for(int j=0;j<reader.maxDoc();j++) {
if (!reader.isDeleted(j)) {
reader.document(j);
reader.getTermFreqVectors(j);
}
}
reader.close();
}
dir.close();
}
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbort() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), is OK:
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
}
// LUCENE-1130: make sure initial IOException, and then 2nd
// IOException during rollback(), with multiple threads, is OK:
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
}
// Throws IOException during DocumentsWriter.closeDocStore
private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInCloseDocStore(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("finishDocument".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStore() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore
public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
}
// LUCENE-1130: test IOException in closeDocStore, with threads
public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
}
// Throws IOException during DocumentsWriter.writeSegment
private static class FailOnlyInWriteSegment extends MockRAMDirectory.Failure {
private boolean onlyOnce;
public FailOnlyInWriteSegment(boolean onlyOnce) {
this.onlyOnce = onlyOnce;
}
@Override
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
if (onlyOnce)
doFail = false;
throw new IOException("now failing on purpose");
}
}
}
}
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegment() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment
public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
_testSingleThreadFailure(new FailOnlyInWriteSegment(true));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
}
// LUCENE-1130: test IOException in writeSegment, with threads
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
}
>>>>>>> YOURS
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(23, reader2.numDocs());
reader.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
ir.deleteDocument(7);
assertEquals(8, ir.numDocs());
ir.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Store.YES,
Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Store.NO, Index.NOT_ANALYZED,
TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setMergePolicy(newLogMergePolicy(50))
);
Document document = new Document();
document = new Document();
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
ir.deleteDocument(i);
assertEquals(49, ir.numDocs());
ir.close();
writer = new IndexWriter(
dir,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
dir.close();
}
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
}
private static final class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
<<<<<<< MINE
=======
super(dir, conf);
}
boolean doFail;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriterPerThread.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-1208
public void testExceptionJustBeforeFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
}
};
Document crashDoc = new Document();
crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
Field.Index.ANALYZED));
try {
w.addDocument(crashDoc, analyzer);
fail("did not hit expected exception");
} catch (IOException ioe) {
// expected
}
w.addDocument(doc);
w.close();
dir.close();
}
private static final class MockIndexWriter2 extends IndexWriter {
public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
boolean doFail;
boolean failed;
@Override
boolean testPoint(String name) {
if (doFail && name.equals("startMergeInit")) {
failed = true;
throw new RuntimeException("intentionally failing");
}
return true;
}
}
// LUCENE-1210
public void testExceptionOnMergeInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
w.doFail = true;
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
for(int i=0;i<10;i++)
try {
w.addDocument(doc);
} catch (RuntimeException re) {
break;
}
((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
assertTrue(w.failed);
w.close();
dir.close();
}
private static final class MockIndexWriter3 extends IndexWriter {
public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
>>>>>>> YOURS
super(dir, conf);
}
boolean afterWasCalled;
boolean beforeWasCalled;
@Override
public void doAfterFlush() {
afterWasCalled = true;
}
@Override
protected void doBeforeFlush() throws IOException {
beforeWasCalled = true;
}
}
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.beforeWasCalled = false;
w.afterWasCalled = false;
w.deleteDocuments(new Term("field", "field"));
w.commit();
assertTrue(w.beforeWasCalled);
assertTrue(w.afterWasCalled);
w.close();
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
dir.close();
}
final String[] utf8Data = new String[] {
// unpaired low surrogate
"ab\udc17cd", "ab\ufffdcd",
"\udc17abcd", "\ufffdabcd",
"\udc17", "\ufffd",
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
"\udc17\udc17", "\ufffd\ufffd",
// unpaired high surrogate
"ab\ud917cd", "ab\ufffdcd",
"\ud917abcd", "\ufffdabcd",
"\ud917", "\ufffd",
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
"\ud917\ud917", "\ufffd\ufffd",
// backwards surrogates
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
"\udc17\ud917", "\ufffd\ufffd",
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
};
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
}
ir.close();
dir.close();
}
// LUCENE-510
public void testAllUnicodeChars() throws Throwable {
BytesRef utf8 = new BytesRef(10);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
char[] chars = new char[2];
for(int ch=0;ch<0x0010FFFF;ch++) {
if (ch == 0xd800)
// Skip invalid code points
ch = 0xe000;
int len = 0;
if (ch <= 0xffff) {
chars[len++] = (char) ch;
} else {
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
}
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
String s1 = new String(chars, 0, len);
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
assertEquals("codepoint " + ch, s1, s2);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
byte[] b = s1.getBytes("UTF-8");
assertEquals(utf8.length, b.length);
for(int j=0;j<utf8.length;j++)
assertEquals(utf8.bytes[j], b[j]);
}
}
private int nextInt(int lim) {
return random.nextInt(lim);
}
private int nextInt(int start, int end) {
return start + nextInt(end-start);
}
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
final int len = offset + count;
boolean hasIllegal = false;
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
// Don't start in the middle of a valid surrogate pair
offset--;
for(int i=offset;i<len;i++) {
int t = nextInt(6);
if (0 == t && i < len-1) {
// Make a surrogate pair
// High surrogate
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
// Low surrogate
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
} else if (t <= 1)
expected[i] = buffer[i] = (char) nextInt(0x80);
else if (2 == t)
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
else if (3 == t)
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
else if (4 == t)
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
else if (5 == t && i < len-1) {
// Illegal unpaired surrogate
if (nextInt(10) == 7) {
if (random.nextBoolean())
buffer[i] = (char) nextInt(0xd800, 0xdc00);
else
buffer[i] = (char) nextInt(0xdc00, 0xe000);
expected[i++] = 0xfffd;
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
hasIllegal = true;
} else
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
} else {
expected[i] = buffer[i] = ' ';
}
}
return hasIllegal;
}
// LUCENE-510
public void testRandomUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(20);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
assertEquals(utf16.length, 20);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
}
}
// LUCENE-510
public void testIncrementalUnicodeStrings() throws Throwable {
char[] buffer = new char[20];
char[] expected = new char[20];
BytesRef utf8 = new BytesRef(new byte[20]);
UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
boolean hasIllegal = false;
byte[] last = new byte[60];
int num = 100000 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
final int prefix;
if (iter == 0 || hasIllegal)
prefix = 0;
else
prefix = nextInt(20);
hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
if (!hasIllegal) {
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
assertEquals(b.length, utf8.length);
for(int i=0;i<b.length;i++)
assertEquals(b[i], utf8.bytes[i]);
}
int bytePrefix = 20;
if (iter == 0 || hasIllegal)
bytePrefix = 0;
else
for(int i=0;i<20;i++)
if (last[i] != utf8.bytes[i]) {
bytePrefix = i;
break;
}
System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
assertEquals(20, utf16.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16.result[i]);
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
assertEquals(20, utf16a.length);
for(int i=0;i<20;i++)
assertEquals(expected[i], utf16a.result[i]);
}
}
// LUCENE-1255
public void testNegativePositions() throws Throwable {
final TokenStream tokens = new TokenStream() {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!terms.hasNext()) return false;
clearAttributes();
termAtt.append(terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
}
};
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
pq.add(new Term("field", "c"));
ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
MultiFields.getDeletedDocs(s.getIndexReader()),
"field",
new BytesRef("a"));
<<<<<<< MINE
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
assertEquals(1, tps.freq());
assertEquals(0, tps.nextPosition());
w.close();
_TestUtil.checkIndex(dir);
s.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommit() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(23, reader3.numDocs());
reader.close();
reader2.close();
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274: test writer.prepareCommit()
public void testPrepareCommitRollback() throws IOException {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(5))
);
writer.commit();
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
IndexReader reader3 = reader.reopen();
assertEquals(0, reader.numDocs());
assertEquals(0, reader2.numDocs());
assertEquals(0, reader3.numDocs());
reader.close();
reader2.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 17; i++)
addDoc(writer);
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
dir.close();
}
// LUCENE-1274
public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.prepareCommit();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
assertEquals(10, f.getBinaryOffset());
assertEquals(17, f.getBinaryLength());
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
ir.close();
dir.close();
}
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
w.close();
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
for(int j=0;j<17;j++)
addDoc(w);
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
w.optimize();
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
dir.close();
}
// LUCENE-2529
public void testPositionIncrementGapEmptyField() throws Exception {
Directory dir = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer();
analyzer.setPositionIncrementGap( 100 );
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
doc.add(f);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
int[] poss = tpv.getTermPositions(0);
assertEquals(1, poss.length);
assertEquals(100, poss[0]);
poss = tpv.getTermPositions(1);
assertEquals(1, poss.length);
assertEquals(101, poss[0]);
r.close();
dir.close();
}
// LUCENE-1468 -- make sure opening an IndexWriter with
// create=true does not remove non-index files
public void testOtherFiles() throws Throwable {
Directory dir = newDirectory();
try {
// Create my own random file:
IndexOutput out = dir.createOutput("myrandomfile");
out.writeByte((byte) 42);
out.close();
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())).close();
assertTrue(dir.fileExists("myrandomfile"));
} finally {
dir.close();
}
}
public void testDeadlock() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// index has 2 segments
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(r1, r2);
writer.close();
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
r1.close();
r2.close();
dir2.close();
dir.close();
}
private class IndexerThreadInterrupt extends Thread {
volatile boolean failed;
volatile boolean finish;
volatile boolean allowInterrupt = false;
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = null;
while(!finish) {
try {
while(true) {
if (w != null) {
w.close();
}
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
w.commit();
}
}
w.close();
_TestUtil.checkIndex(dir);
IndexReader.open(dir, true).close();
// Strangely, if we interrupt a thread before
// all classes are loaded, the class loader
// seems to do scary things with the interrupt
// status. In java 1.5, it'll throw an
// incorrect ClassNotFoundException. In java
// 1.6, it'll silently clear the interrupt.
// So, on first iteration through here we
// don't open ourselves up for interrupts
// until we've done the above loop.
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
break;
}
} catch (Throwable t) {
System.out.println("FAILED; unexpected exception");
t.printStackTrace(System.out);
failed = true;
break;
}
}
if (!failed) {
// clear interrupt state:
Thread.interrupted();
try {
w.rollback();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
_TestUtil.checkIndex(dir);
} catch (Exception e) {
failed = true;
System.out.println("CheckIndex FAILED: unexpected exception");
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
failed = true;
System.out.println("IndexReader.open FAILED: unexpected exception");
e.printStackTrace(System.out);
}
}
try {
dir.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
int i = 0;
while(i < 100) {
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
public void testIndexStoreCombos() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
f.setTokenStream(new MockTokenizer(new StringReader("doc2field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc2field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
f.setTokenStream(new MockTokenizer(new StringReader("doc3field1"), MockTokenizer.WHITESPACE, false));
f2.setTokenStream(new MockTokenizer(new StringReader("doc3field2"), MockTokenizer.WHITESPACE, false));
w.addDocument(doc);
w.commit();
w.optimize(); // force segment merge.
w.close();
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
assertTrue(b != null);
assertEquals(17, b.length, 17);
assertEquals(87, b[0]);
assertTrue(ir.document(0).getFieldable("binary").isBinary());
assertTrue(ir.document(1).getFieldable("binary").isBinary());
assertTrue(ir.document(2).getFieldable("binary").isBinary());
assertEquals("value", ir.document(0).get("string"));
assertEquals("value", ir.document(1).get("string"));
assertEquals("value", ir.document(2).get("string"));
// test that the terms were indexed.
<<<<<<< MINE
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
=======
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS);
>>>>>>> YOURS
ir.close();
dir.close();
}
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
r.close();
w.close();
d.close();
}
public void testEmbeddedFFFF() throws Throwable {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
r.close();
w.close();
_TestUtil.checkIndex(d);
d.close();
}
public void testNoDocsIndex() throws Throwable {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
dir.close();
}
// LUCENE-2095: make sure with multiple threads commit
// doesn't return until all changes are in fact in the
// index
public void testCommitThreadSafety() throws Throwable {
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
_TestUtil.reduceOpenFiles(w.w);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
for(int i=0;i<NUM_THREADS;i++) {
final int finalI = i;
threads[i] = new Thread() {
@Override
public void run() {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
IndexReader r2 = r.reopen();
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
// both start & end are inclusive
private final int getInt(Random r, int start, int end) {
return start + r.nextInt(1+end-start);
}
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
BytesRef last = new BytesRef();
Set<String> seenTerms = new HashSet<String>();
while(true) {
final BytesRef term = terms.next();
if (term == null) {
break;
}
assertTrue(last.compareTo(term) < 0);
last.copy(term);
final String s = term.utf8ToString();
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
seenTerms.add(s);
}
if (isTop) {
assertTrue(allTerms.equals(seenTerms));
}
// Test seeking:
Iterator<String> it = seenTerms.iterator();
while(it.hasNext()) {
BytesRef tr = new BytesRef(it.next());
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
TermsEnum.SeekStatus.FOUND,
terms.seek(tr));
}
}
private final String asUnicodeChar(char c) {
return "U+" + Integer.toHexString(c);
}
private final String termDesc(String s) {
final String s0;
assertTrue(s.length() <= 2);
if (s.length() == 1) {
s0 = asUnicodeChar(s.charAt(0));
} else {
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
}
return s0;
}
// Make sure terms, including ones with surrogate pairs,
// sort in codepoint sort order by default
public void testTermUTF16SortOrder() throws Throwable {
<<<<<<< MINE
Random rnd = random;
Directory dir = newDirectory();
=======
Random rnd = newRandom();
Directory dir = new MockRAMDirectory();
>>>>>>> YOURS
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = 200 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
final IndexReader[] subs = r.getSequentialSubReaders();
for(int i=0;i<subs.length;i++) {
checkTermsOrder(subs[i], allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.optimize();
// Test optimized single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
public void testIndexDivisor() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
config.setTermIndexInterval(2);
IndexWriter w = new IndexWriter(dir, config);
StringBuilder s = new StringBuilder();
// must be > 256
for(int i=0;i<300;i++) {
s.append(' ').append(i);
}
Document d = new Document();
Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader().getSequentialSubReaders()[0];
TermsEnum t = r.fields().terms("field").iterator();
int count = 0;
while(t.next() != null) {
final DocsEnum docs = t.docs(null, null);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;
}
assertEquals(300, count);
r.close();
w.close();
dir.close();
}
public void testDeleteUnusedFiles() throws Exception {
for(int iter=0;iter<2;iter++) {
Directory dir = newDirectory();
LogMergePolicy mergePolicy = newLogMergePolicy(true);
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMergePolicy(mergePolicy)
);
Document doc = new Document();
doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
// use NRT
r = w.getReader();
} else {
// don't use NRT
w.commit();
r = IndexReader.open(dir);
}
List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
w.optimize();
if (iter == 1) {
w.commit();
}
IndexReader r2 = r.reopen();
assertTrue(r != r2);
files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
// optimize created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
// r still holds this file open
assertTrue(files.contains("_0.cfs"));
//assertTrue(files.contains("_2.cfs"));
r.close();
if (iter == 0) {
// on closing NRT reader, it calls writer.deleteUnusedFiles
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
} else {
// now writer can remove it
w.deleteUnusedFiles();
files = Arrays.asList(dir.listAll());
assertFalse(files.contains("_0.cfs"));
}
//assertTrue(files.contains("_2.cfs"));
w.close();
r2.close();
dir.close();
}
}
public void testDeleteUnsedFiles2() throws Exception {
// Validates that iw.deleteUnusedFiles() also deletes unused index commits
// in case a deletion policy which holds onto commits is used.
Directory dir = newDirectory();
SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(sdp));
// First commit
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
// Keep that commit
sdp.snapshot("id");
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
// Should delete the unreferenced commit
sdp.release("id");
writer.deleteUnusedFiles();
assertEquals(1, IndexReader.listCommits(dir).size());
writer.close();
dir.close();
}
private static class FlushCountingIndexWriter extends IndexWriter {
int flushCount;
public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
super(dir, iwc);
}
@Override
public void doAfterFlush() {
flushCount++;
}
}
<<<<<<< MINE
public void testIndexingThenDeleting() throws Exception {
final Random r = random;
=======
// nocommit - TODO: enable when flushing by RAM is implemented
public void _testIndexingThenDeleting() throws Exception {
final Random r = newRandom();
>>>>>>> YOURS
Directory dir = newDirectory();
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
<<<<<<< MINE
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
=======
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
>>>>>>> YOURS
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
final boolean doIndexing = r.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: iter doIndexing=" + doIndexing);
}
if (doIndexing) {
// Add docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.addDocument(doc);
count++;
}
} else {
// Delete docs until a flush is triggered
final int startFlushCount = w.flushCount;
while(w.flushCount == startFlushCount) {
w.deleteDocuments(new Term("foo", ""+count));
count++;
}
}
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500);
}
w.close();
dir.close();
}
public void testNoCommits() throws Exception {
// Tests that if we don't call commit(), the directory has 0 commits. This has
// changed since LUCENE-2386, where before IW would always commit on a fresh
// new index.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
try {
IndexReader.listCommits(dir);
fail("listCommits should have thrown an exception over empty index");
} catch (IndexNotFoundException e) {
// that's expected !
}
// No changes still should generate a commit, because it's a new index.
writer.close();
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
dir.close();
}
public void testEmptyFSDirWithNoLock() throws Exception {
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
// when listAll() was called in IndexFileDeleter.
Directory dir = newFSDirectory(new File(TEMP_DIR, "emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())).close();
dir.close();
}
public void testEmptyDirRollback() throws Exception {
// Tests that if IW is created over an empty Directory, some documents are
// indexed, flushed (but not committed) and then IW rolls back, then no
// files are left in the Directory.
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2));
String[] files = dir.listAll();
// Creating over empty dir should not create any files,
// or, at most the write.lock file
final int extraFileCount;
if (files.length == 1) {
assertEquals("write.lock", files[0]);
extraFileCount = 1;
} else {
assertEquals(0, files.length);
extraFileCount = 0;
}
Document doc = new Document();
// create as many files as possible
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
<<<<<<< MINE
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
=======
assertTrue("flush should have occurred and files created", dir.listAll().length > 0);
>>>>>>> YOURS
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
// Since we rolled-back above, that close should be a no-op
writer.close();
assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
dir.close();
}
public void testNoSegmentFile() throws IOException {
<<<<<<< MINE
Directory dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
}
public void testFutureCommit() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setInfoStream(System.out);
//w.w.setUseCompoundFile(false);
if (VERBOSE) {
w.w.setInfoStream(System.out);
}
final int docCount = 200*RANDOM_MULTIPLIER;
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc " + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.optimize();
}
}
w.close();
dir.close();
}
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
BIG=BIG+BIG+BIG+BIG;
for (int i=0; i<2; i++) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
indexWriter.close();
_TestUtil.checkIndex(dir);
assertNoUnreferencedFiles(dir, "no tv files");
String[] files = dir.listAll();
for(String file : files) {
assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
}
=======
File tempDir = _TestUtil.getTempDir("noSegmentFile");
try {
Directory dir = FSDirectory.open(tempDir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
dir.close();
} finally {
_TestUtil.rmDir(tempDir);
}
}
public void testFutureCommit() throws Exception {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
Document doc = new Document();
w.addDocument(doc);
// commit to "first"
Map<String,String> commitData = new HashMap<String,String>();
commitData.put("tag", "first");
w.commit(commitData);
// commit to "second"
w.addDocument(doc);
commitData.put("tag", "second");
w.commit(commitData);
w.close();
// open "first" with IndexWriter
IndexCommit commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("first")) {
commit = c;
break;
}
}
assertNotNull(commit);
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
assertEquals(1, w.numDocs());
// commit IndexWriter to "third"
w.addDocument(doc);
commitData.put("tag", "third");
w.commit(commitData);
w.close();
// make sure "second" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("second")) {
commit = c;
break;
}
}
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
>>>>>>> YOURS
dir.close();
}
}
Diff Result
No diff
Case 31 - java_lucenesolr.rev_2ede7_249fd..TestMultiTermConstantScore.java
Base
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
public TestMultiTermConstantScore(String name) {
super(name);
}
public TestMultiTermConstantScore() {
super();
}
Directory small;
void assertEquals(String m, float e, float a) {
assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@Override
protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
IndexWriter writer = new IndexWriter(small, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
writer.optimize();
writer.close();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits[0].doc);
assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits[0].doc);
assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(0, hits[0].doc);
assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndex.minR);
String maxRP = pad(signedIndex.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = IndexReader.open(unsignedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndex.minR);
String maxRP = pad(unsignedIndex.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
public void testFarsi() throws Exception {
/* build an index */
RAMDirectory farsiIndex = new RAMDirectory();
IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(farsiIndex, true);
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
}
public void testDanish() throws Exception {
/* build an index */
RAMDirectory danishIndex = new RAMDirectory();
IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(danishIndex, true);
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
public TestMultiTermConstantScore(String name) {
super(name);
}
public TestMultiTermConstantScore() {
super();
}
Directory small;
void assertEquals(String m, float e, float a) {
assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@Override
protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
IndexWriter writer = new IndexWriter(small, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
writer.optimize();
writer.close();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits[0].doc);
assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits[0].doc);
assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(0, hits[0].doc);
assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndex.minR);
String maxRP = pad(signedIndex.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = IndexReader.open(unsignedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndex.minR);
String maxRP = pad(unsignedIndex.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
public void testFarsi() throws Exception {
/* build an index */
RAMDirectory farsiIndex = new RAMDirectory();
IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(farsiIndex, true);
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
}
public void testDanish() throws Exception {
/* build an index */
RAMDirectory danishIndex = new RAMDirectory();
IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(danishIndex, true);
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
}
}
Left
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
static Directory small;
static IndexReader reader;
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@BeforeClass
public static void beforeClass() throws Exception {
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(newField("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(newField("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(newField("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
Document doc = new Document();
doc.add(newField("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(newField("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(newField("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
static Directory small;
static IndexReader reader;
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@BeforeClass
public static void beforeClass() throws Exception {
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(newField("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(newField("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(newField("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
Document doc = new Document();
doc.add(newField("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(newField("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(newField("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
Right
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
Directory small;
IndexReader reader;
void assertEquals(String m, float e, float a) {
assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@Override
protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits[0].doc);
assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits[0].doc);
assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(0, hits[0].doc);
assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
public void testFarsi() throws Exception {
/* build an index */
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
public void testDanish() throws Exception {
/* build an index */
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
Directory small;
IndexReader reader;
void assertEquals(String m, float e, float a) {
assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@Override
protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits[0].doc);
assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits[0].doc);
assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
assertEquals(0, hits[0].doc);
assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
public void testFarsi() throws Exception {
/* build an index */
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
public void testDanish() throws Exception {
/* build an index */
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
MergeMethods
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
static Directory small;
<<<<<<< MINE
static IndexReader reader;
=======
IndexReader reader;
>>>>>>> YOURS
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
}
@BeforeClass
public static void beforeClass() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
<<<<<<< MINE
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
<<<<<<< MINE
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
static Directory small;
<<<<<<< MINE
static IndexReader reader;
=======
IndexReader reader;
>>>>>>> YOURS
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
}
@BeforeClass
public static void beforeClass() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
<<<<<<< MINE
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
<<<<<<< MINE
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
KeepBothMethods
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
static Directory small;
<<<<<<< MINE
static IndexReader reader;
=======
IndexReader reader;
>>>>>>> YOURS
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@BeforeClass
public static void beforeClass() throws Exception {
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(newField("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(newField("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(newField("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
}
@Override
protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
<<<<<<< MINE
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
<<<<<<< MINE
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
static Directory small;
<<<<<<< MINE
static IndexReader reader;
=======
IndexReader reader;
>>>>>>> YOURS
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@BeforeClass
public static void beforeClass() throws Exception {
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(newField("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(newField("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(newField("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
}
@Override
protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
<<<<<<< MINE
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
<<<<<<< MINE
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
Safe
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
static Directory small;
<<<<<<< MINE
static IndexReader reader;
=======
IndexReader reader;
>>>>>>> YOURS
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
}
<<<<<<< MINE
@Override
protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
=======
@BeforeClass
public static void beforeClass() throws Exception {
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(newField("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(newField("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(newField("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
>>>>>>> YOURS
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
<<<<<<< MINE
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
<<<<<<< MINE
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
static Directory small;
<<<<<<< MINE
static IndexReader reader;
=======
IndexReader reader;
>>>>>>> YOURS
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
}
<<<<<<< MINE
@Override
protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(new Field("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
=======
@BeforeClass
public static void beforeClass() throws Exception {
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(newField("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(newField("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(newField("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
>>>>>>> YOURS
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
<<<<<<< MINE
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(new Field("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
<<<<<<< MINE
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(new Field("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
Unstructured
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
<<<<<<< MINE
static Directory small;
static IndexReader reader;
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
=======
Directory small;
IndexReader reader;
void assertEquals(String m, float e, float a) {
assertEquals(m, e, a, SCORE_COMP_THRESH);
>>>>>>> YOURS
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
<<<<<<< MINE
@BeforeClass
public static void beforeClass() throws Exception {
=======
@Override
protected void setUp() throws Exception {
super.setUp();
>>>>>>> YOURS
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
<<<<<<< MINE
small = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
=======
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
>>>>>>> YOURS
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(newField("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(newField("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(newField("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
<<<<<<< MINE
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
=======
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
>>>>>>> YOURS
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
<<<<<<< MINE
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
Document doc = new Document();
doc.add(newField("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(newField("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
<<<<<<< MINE
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(newField("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import junit.framework.Assert;
public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
<<<<<<< MINE
static Directory small;
static IndexReader reader;
static public void assertEquals(String m, float e, float a) {
Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
=======
Directory small;
IndexReader reader;
void assertEquals(String m, float e, float a) {
assertEquals(m, e, a, SCORE_COMP_THRESH);
>>>>>>> YOURS
}
static public void assertEquals(String m, int e, int a) {
Assert.assertEquals(m, e, a);
}
<<<<<<< MINE
@BeforeClass
public static void beforeClass() throws Exception {
=======
@Override
protected void setUp() throws Exception {
super.setUp();
>>>>>>> YOURS
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
<<<<<<< MINE
small = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
=======
small = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
>>>>>>> YOURS
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(newField("id", String.valueOf(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
doc
.add(newField("all", "all", Field.Store.YES,
Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(newField("data", data[i], Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
<<<<<<< MINE
@AfterClass
public static void afterClass() throws Exception {
reader.close();
small.close();
reader = null;
small = null;
=======
@Override
protected void tearDown() throws Exception {
reader.close();
small.close();
super.tearDown();
>>>>>>> YOURS
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
query.setRewriteMethod(method);
return query;
}
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il,
boolean ih, Collator c) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
return query;
}
@Test
public void testBasics() throws IOException {
QueryUtils.check(csrq("data", "1", "6", T, T));
QueryUtils.check(csrq("data", "A", "Z", T, T));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
"Z", T, T));
QueryUtils.check(cspq(new Term("data", "p*u?")));
QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
"data", "pres*")));
QueryUtils.check(cswcq(new Term("data", "p")));
QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
"data", "pr*t?j")));
}
@Test
public void testBasicsRngCollating() throws IOException {
Collator c = Collator.getInstance(Locale.ENGLISH);
QueryUtils.check(csrq("data", "1", "6", T, T, c));
QueryUtils.check(csrq("data", "A", "Z", T, T, c));
QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
"Z", T, T, c));
}
@Test
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
// some hits match more terms then others, score should be the same
result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
int numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
float score = result[0].score;
for (int i = 1; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
assertEquals("score for " + i + " was not the same", score,
result[i].score);
}
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
Query q = csrq("data", "1", "6", T, T);
q.setBoost(100);
search.search(q, null, new Collector() {
private int base = 0;
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
base = docBase;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
});
//
// Ensure that boosting works to score one clause of a query higher
// than another.
//
Query q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(.1f);
Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
BooleanQuery bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(1, hits[0].doc);
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T); // matches document #0
q1.setBoost(10f);
q2 = csrq("data", "Z", "Z", T, T); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
hits = search.search(bq, null, 1000).scoreDocs;
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
Query rq = new TermRangeQuery("data", "1", "4", T, T);
ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
int numHits = expected.length;
// now do a boolean where which also contains a
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
q.add(rq, BooleanClause.Occur.MUST);// T, F);
q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
assertEquals("wrong numebr of hits", numHits, actual.length);
for (int i = 0; i < numHits; i++) {
assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
actual[i].doc);
}
}
@Test
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test id, bounded on both ends
result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("med,med,F,F,c", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
}
@Test
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
@Test
public void testRangeQueryRandCollating() throws IOException {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Collator c = Collator.getInstance(Locale.ENGLISH);
// test extremes, bounded on both ends
result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("min,min,F,F,c", 0, result.length);
result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
assertEquals("max,max,F,F,c", 0, result.length);
result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("min,min,T,T,c", 1, result.length);
result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
assertEquals("nul,min,F,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
}
@Test
public void testFarsi() throws Exception {
/* build an index */
<<<<<<< MINE
Directory farsiIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory farsiIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
Document doc = new Document();
doc.add(newField("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc
.add(newField("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
Collator c = Collator.getInstance(new Locale("ar"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a ConstantScoreRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
reader.close();
farsiIndex.close();
}
@Test
public void testDanish() throws Exception {
/* build an index */
<<<<<<< MINE
Directory danishIndex = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
=======
RAMDirectory danishIndex = new RAMDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, new MockAnalyzer(MockTokenizer.SIMPLE, true));
>>>>>>> YOURS
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
Document doc = new Document();
doc.add(newField("content", words[docnum],
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("body", "body",
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
ScoreDoc[] result = search.search
(csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
result = search.search
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
reader.close();
danishIndex.close();
}
}
Diff Result
No diff
Case 32 - java_lucenesolr.rev_2ede7_249fd..TestReplicationHandler.java
Base
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import java.io.*;
import java.net.URL;
import junit.framework.TestCase;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends TestCase {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
JettySolrRunner masterJetty, slaveJetty;
SolrServer masterClient, slaveClient;
SolrInstance master = null, slave = null;
String context = "/solr";
public void setUp() throws Exception {
super.setUp();
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Override
public void tearDown() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
super.tearDown();
}
private JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
protected SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
public void testIndexAndConfigReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
if (slaveQueryResult.getNumFound() == 0) {
//try sleeping again in case of slower comp
Thread.sleep(5000);
slaveQueryRsp = query("*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
}
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
//sleep for 3s for replication to happen.
Thread.sleep(3000);
slaveQueryRsp = query("*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
public void testIndexAndConfigAliasReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
if (slaveQueryResult.getNumFound() == 0) {
//try sleeping again in case of slower comp
Thread.sleep(5000);
slaveQueryRsp = query("*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
}
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
//sleep for 3s for replication to happen.
Thread.sleep(3000);
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = query("*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
public void testStopPoll() throws Exception {
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
slaveQueryRsp = query("*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
slaveQueryRsp = query("*:*", masterClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(501, slaveQueryResult.getNumFound());
}
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"));
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
public void testReplicateAfterWrite2Slave() throws Exception {
//add 50 docs to master
int nDocs = 50;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
Thread.sleep(100);
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
NamedList slaveQueryRsp = query("id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = query("id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
}
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
IndexSearcher searcher = new IndexSearcher(new SimpleFSDirectory(snapDir.getAbsoluteFile(), null), true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals(500, hits.totalHits);
}
/* character copy of file using UTF-8 */
void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private class SolrInstance extends AbstractSolrTestCase {
String name;
Integer masterPort;
File homeDir;
File confDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
@Override
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
@Override
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
String home = System.getProperty("java.io.tmpdir")
+ File.separator
+ getClass().getName() + "-" + System.currentTimeMillis();
if (null == masterPort) {
homeDir = new File(home + "master");
dataDir = new File(home + "master", "data");
confDir = new File(home + "master", "conf");
} else {
homeDir = new File(home + "slave");
dataDir = new File(home + "slave", "data");
confDir = new File(home + "slave", "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
super.tearDown();
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import java.io.*;
import java.net.URL;
import junit.framework.TestCase;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends TestCase {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
JettySolrRunner masterJetty, slaveJetty;
SolrServer masterClient, slaveClient;
SolrInstance master = null, slave = null;
String context = "/solr";
public void setUp() throws Exception {
super.setUp();
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Override
public void tearDown() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
super.tearDown();
}
private JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
protected SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
public void testIndexAndConfigReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
if (slaveQueryResult.getNumFound() == 0) {
//try sleeping again in case of slower comp
Thread.sleep(5000);
slaveQueryRsp = query("*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
}
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
//sleep for 3s for replication to happen.
Thread.sleep(3000);
slaveQueryRsp = query("*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
public void testIndexAndConfigAliasReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
if (slaveQueryResult.getNumFound() == 0) {
//try sleeping again in case of slower comp
Thread.sleep(5000);
slaveQueryRsp = query("*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
}
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
//sleep for 3s for replication to happen.
Thread.sleep(3000);
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = query("*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
public void testStopPoll() throws Exception {
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
slaveQueryRsp = query("*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
slaveQueryRsp = query("*:*", masterClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(501, slaveQueryResult.getNumFound());
}
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"));
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
public void testReplicateAfterWrite2Slave() throws Exception {
//add 50 docs to master
int nDocs = 50;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
Thread.sleep(100);
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
NamedList slaveQueryRsp = query("id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = query("id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
}
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
IndexSearcher searcher = new IndexSearcher(new SimpleFSDirectory(snapDir.getAbsoluteFile(), null), true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals(500, hits.totalHits);
}
/* character copy of file using UTF-8 */
void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private class SolrInstance extends AbstractSolrTestCase {
String name;
Integer masterPort;
File homeDir;
File confDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
@Override
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
@Override
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
String home = System.getProperty("java.io.tmpdir")
+ File.separator
+ getClass().getName() + "-" + System.currentTimeMillis();
if (null == masterPort) {
homeDir = new File(home + "master");
dataDir = new File(home + "master", "data");
confDir = new File(home + "master", "conf");
} else {
homeDir = new File(home + "slave");
dataDir = new File(home + "slave", "data");
confDir = new File(home + "slave", "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
super.tearDown();
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
Left
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
clearIndexWithReplication();
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals(nDocs, hits.totalHits);
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
clearIndexWithReplication();
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals(nDocs, hits.totalHits);
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
Right
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Before
public void setUp() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testIndexAndConfigReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"));
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
//add 50 docs to master
int nDocs = 50;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals(500, hits.totalHits);
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
String home = System.getProperty("java.io.tmpdir")
+ File.separator
+ getClass().getName() + "-" + System.currentTimeMillis();
if (null == masterPort) {
homeDir = new File(home + "master");
dataDir = new File(home + "master", "data");
confDir = new File(home + "master", "conf");
} else {
homeDir = new File(home + "slave");
dataDir = new File(home + "slave", "data");
confDir = new File(home + "slave", "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Before
public void setUp() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testIndexAndConfigReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"));
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
//add 50 docs to master
int nDocs = 50;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals(500, hits.totalHits);
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
String home = System.getProperty("java.io.tmpdir")
+ File.separator
+ getClass().getName() + "-" + System.currentTimeMillis();
if (null == masterPort) {
homeDir = new File(home + "master");
dataDir = new File(home + "master", "data");
confDir = new File(home + "master", "conf");
} else {
homeDir = new File(home + "slave");
dataDir = new File(home + "slave", "data");
confDir = new File(home + "slave", "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
MergeMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
}
@BeforeClass
public static void beforeClass() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
}
@Override
public void tearDown() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
super.tearDown();
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
clearIndexWithReplication();
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
<<<<<<< MINE
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
=======
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
>>>>>>> YOURS
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
<<<<<<< MINE
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
<<<<<<< MINE
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
=======
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
>>>>>>> YOURS
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
<<<<<<< MINE
assertEquals(nDocs, hits.totalHits);
=======
assertEquals(500, hits.totalHits);
>>>>>>> YOURS
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance extends AbstractSolrTestCase {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
}
@BeforeClass
public static void beforeClass() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
}
@Override
public void tearDown() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
super.tearDown();
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
clearIndexWithReplication();
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
<<<<<<< MINE
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
=======
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
>>>>>>> YOURS
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
<<<<<<< MINE
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
<<<<<<< MINE
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
=======
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
>>>>>>> YOURS
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
<<<<<<< MINE
assertEquals(nDocs, hits.totalHits);
=======
assertEquals(500, hits.totalHits);
>>>>>>> YOURS
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance extends AbstractSolrTestCase {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
KeepBothMethods
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
}
@Before
public void setUp() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
}
@Override
public void tearDown() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
super.tearDown();
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
clearIndexWithReplication();
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
<<<<<<< MINE
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
=======
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
>>>>>>> YOURS
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
<<<<<<< MINE
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
<<<<<<< MINE
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
=======
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
>>>>>>> YOURS
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
<<<<<<< MINE
assertEquals(nDocs, hits.totalHits);
=======
assertEquals(500, hits.totalHits);
>>>>>>> YOURS
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance extends AbstractSolrTestCase {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
}
@Before
public void setUp() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
}
@Override
public void tearDown() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
super.tearDown();
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
clearIndexWithReplication();
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
<<<<<<< MINE
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
=======
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
>>>>>>> YOURS
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
<<<<<<< MINE
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
<<<<<<< MINE
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
=======
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
>>>>>>> YOURS
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
<<<<<<< MINE
assertEquals(nDocs, hits.totalHits);
=======
assertEquals(500, hits.totalHits);
>>>>>>> YOURS
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance extends AbstractSolrTestCase {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
Safe
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
}
<<<<<<< MINE
@Before
public void setUp() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
}
=======
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
>>>>>>> YOURS
@Override
public void tearDown() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
super.tearDown();
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
clearIndexWithReplication();
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
<<<<<<< MINE
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
=======
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
>>>>>>> YOURS
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
<<<<<<< MINE
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
<<<<<<< MINE
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
=======
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
>>>>>>> YOURS
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
<<<<<<< MINE
assertEquals(nDocs, hits.totalHits);
=======
assertEquals(500, hits.totalHits);
>>>>>>> YOURS
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance extends AbstractSolrTestCase {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
}
<<<<<<< MINE
@Before
public void setUp() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
}
=======
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
>>>>>>> YOURS
@Override
public void tearDown() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
super.tearDown();
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
@Test
public void testStopPoll() throws Exception {
clearIndexWithReplication();
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
<<<<<<< MINE
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
=======
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
>>>>>>> YOURS
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
<<<<<<< MINE
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
<<<<<<< MINE
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
=======
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
>>>>>>> YOURS
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
<<<<<<< MINE
assertEquals(nDocs, hits.totalHits);
=======
assertEquals(500, hits.totalHits);
>>>>>>> YOURS
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance extends AbstractSolrTestCase {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
Unstructured
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
<<<<<<< MINE
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
=======
>>>>>>> YOURS
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
<<<<<<< MINE
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
=======
@Before
public void setUp() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
>>>>>>> YOURS
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
<<<<<<< MINE
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
=======
@Test
public void testIndexAndConfigReplication() throws Exception {
>>>>>>> YOURS
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
<<<<<<< MINE
public void testStopPoll() throws Exception {
clearIndexWithReplication();
=======
public void testIndexAndConfigAliasReplication() throws Exception {
>>>>>>> YOURS
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
=======
>>>>>>> YOURS
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
=======
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
>>>>>>> YOURS
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
<<<<<<< MINE
=======
@Test
public void testStopPoll() throws Exception {
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"));
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
//add 50 docs to master
int nDocs = 50;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
}
>>>>>>> YOURS
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
<<<<<<< MINE
assertEquals(nDocs, hits.totalHits);
=======
assertEquals(500, hits.totalHits);
>>>>>>> YOURS
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.TestDistributedSearch;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.net.URL;
/**
* Test for ReplicationHandler
*
* @version $Id$
* @since 1.4
*/
public class TestReplicationHandler extends SolrTestCaseJ4 {
private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator;
private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml";
static JettySolrRunner masterJetty, slaveJetty;
static SolrServer masterClient, slaveClient;
static SolrInstance master = null, slave = null;
static String context = "/solr";
<<<<<<< MINE
// number of docs to index... decremented for each test case to tell if we accidentally reuse
// index from previous test method
static int nDocs = 500;
=======
>>>>>>> YOURS
@BeforeClass
public static void beforeClass() throws Exception {
master = new SolrInstance("master", null);
master.setUp();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
slave = new SolrInstance("slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
<<<<<<< MINE
public void clearIndexWithReplication() throws Exception {
NamedList res = query("*:*", masterClient);
SolrDocumentList docs = (SolrDocumentList)res.get("response");
if (docs.getNumFound() != 0) {
masterClient.deleteByQuery("*:*");
masterClient.commit();
// wait for replication to sync
res = rQuery(0, "*:*", slaveClient);
assertEquals(0, ((SolrDocumentList) res.get("response")).getNumFound());
}
=======
@Before
public void setUp() throws Exception {
super.setUp();
masterClient.deleteByQuery("*:*");
masterClient.commit();
rQuery(0, "*:*", masterClient);
slaveClient.deleteByQuery("*:*");
slaveClient.commit();
rQuery(0, "*:*", slaveClient);
>>>>>>> YOURS
}
@AfterClass
public static void afterClass() throws Exception {
masterJetty.stop();
slaveJetty.stop();
master.tearDown();
slave.tearDown();
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {
System.setProperty("solr.solr.home", instance.getHomeDir());
System.setProperty("solr.data.dir", instance.getDataDir());
JettySolrRunner jetty = new JettySolrRunner("/solr", 0);
jetty.start();
return jetty;
}
private static SolrServer createNewSolrServer(int port) {
try {
// setup the server...
String url = "http://localhost:" + port + context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer(url);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
int index(SolrServer s, Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
for (int i = 0; i < fields.length; i += 2) {
doc.addField((String) (fields[i]), fields[i + 1]);
}
return s.add(doc).getStatus();
}
NamedList query(String query, SolrServer s) throws SolrServerException {
NamedList res = new SimpleOrderedMap();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", query);
QueryResponse qres = s.query(params);
res = qres.getResponse();
return res;
}
/** will sleep up to 30 seconds, looking for expectedDocCount */
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
int timeSlept = 0;
NamedList res = null;
SolrDocumentList docList = null;
do {
res = query(query, server);
docList = (SolrDocumentList) res.get("response");
timeSlept += 100;
Thread.sleep(100);
} while(docList.getNumFound() != expectedDocCount && timeSlept < 30000);
return res;
}
<<<<<<< MINE
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
=======
@Test
public void testIndexAndConfigReplication() throws Exception {
>>>>>>> YOURS
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
SolrDocumentList slaveQueryResult = null;
NamedList slaveQueryRsp;
slaveQueryRsp = rQuery(1, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
// make sure we replicated the correct index from the master
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
}
@Test
public void testIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
=======
assertEquals(500, slaveQueryResult.getNumFound());
>>>>>>> YOURS
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
}
@Test
<<<<<<< MINE
public void testStopPoll() throws Exception {
clearIndexWithReplication();
=======
public void testIndexAndConfigAliasReplication() throws Exception {
>>>>>>> YOURS
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
=======
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
>>>>>>> YOURS
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
<<<<<<< MINE
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
=======
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
>>>>>>> YOURS
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
<<<<<<< MINE
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(slaveURL);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
masterQueryRsp = rQuery(nDocs+1, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs+1, masterQueryResult.getNumFound());
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
// re-enable replication
slaveURL = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=enablepoll";
url = new URL(slaveURL);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
=======
>>>>>>> YOURS
slaveQueryRsp = rQuery(nDocs+1, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs+1, slaveQueryResult.getNumFound());
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
masterClient.deleteByQuery("*:*");
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: at this point, the slave is not polling any more
// restore it.
copyFile(new File(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// NOTE: the master only replicates after startup now!
// revert that change.
copyFile(new File(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
}
@Test
public void testIndexAndConfigAliasReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
<<<<<<< MINE
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
=======
NamedList masterQueryRsp2 = rQuery(1, "*:*", masterClient);
SolrDocumentList masterQueryResult2 = (SolrDocumentList) masterQueryRsp2.get("response");
assertEquals(1, masterQueryResult2.getNumFound());
>>>>>>> YOURS
NamedList slaveQueryRsp2 = rQuery(1, "*:*", slaveClient);
SolrDocumentList slaveQueryResult2 = (SolrDocumentList) slaveQueryRsp2.get("response");
assertEquals(1, slaveQueryResult2.getNumFound());
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
<<<<<<< MINE
=======
@Test
public void testStopPoll() throws Exception {
// Test:
// setup master/slave.
// stop polling on slave, add a doc to master and verify slave hasn't picked it.
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
// start stop polling test
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=disablepoll";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
index(masterClient, "id", 501, "name", "name = " + 501);
masterClient.commit();
//get docs from master and check if number is equal to master
masterQueryRsp = rQuery(501, "*:*", masterClient);
masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(501, masterQueryResult.getNumFound());
// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
Thread.sleep(3000);
//get docs from slave and check if number is not equal to master; polling is disabled
slaveQueryRsp = rQuery(500, "*:*", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
}
@Test
public void testSnapPullWithMasterUrl() throws Exception {
//change solrconfig on slave
//this has no entry for pollinginterval
copyFile(new File(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"));
slaveJetty.stop();
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
// snappull
String masterUrl = "http://localhost:" + slaveJetty.getLocalPort() + "/solr/replication?command=fetchindex&masterUrl=";
masterUrl += "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
@Test
public void testReplicateAfterStartup() throws Exception {
//stop slave
slaveJetty.stop();
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(500, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//change solrconfig having 'replicateAfter startup' option on master
copyFile(new File(CONF_DIR + "solrconfig-master2.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
masterJetty.stop();
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
copyFile(new File(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort());
//start slave
slaveJetty = createJetty(slave);
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(500, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
}
@Test
public void testReplicateAfterWrite2Slave() throws Exception {
//add 50 docs to master
int nDocs = 50;
for (int i = 0; i < nDocs; i++) {
index(masterClient, "id", i, "name", "name = " + i);
}
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=disableReplication";
URL url = new URL(masterUrl);
InputStream stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
masterClient.commit();
NamedList masterQueryRsp = rQuery(50, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, masterQueryResult.getNumFound());
// Make sure that both the index version and index generation on the slave is
// higher than that of the master, just to make the test harder.
index(slaveClient, "id", 551, "name", "name = " + 551);
slaveClient.commit(true, true);
index(slaveClient, "id", 552, "name", "name = " + 552);
slaveClient.commit(true, true);
index(slaveClient, "id", 553, "name", "name = " + 553);
slaveClient.commit(true, true);
index(slaveClient, "id", 554, "name", "name = " + 554);
slaveClient.commit(true, true);
index(slaveClient, "id", 555, "name", "name = " + 555);
slaveClient.commit(true, true);
//this doc is added to slave so it should show an item w/ that result
NamedList slaveQueryRsp = rQuery(1, "id:555", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(1, slaveQueryResult.getNumFound());
masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=enableReplication";
url = new URL(masterUrl);
stream = url.openStream();
try {
stream.close();
} catch (IOException e) {
//e.printStackTrace();
}
//the slave should have done a full copy of the index so the doc with id:555 should not be there in the slave now
slaveQueryRsp = rQuery(0, "id:555", slaveClient);
slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(0, slaveQueryResult.getNumFound());
}
>>>>>>> YOURS
@Test
public void testBackup() throws Exception {
masterJetty.stop();
copyFile(new File(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml"));
masterJetty = createJetty(master);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
nDocs--;
masterClient.deleteByQuery("*:*");
for (int i = 0; i < nDocs; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
class BackupThread extends Thread {
volatile String fail = null;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
BackupThread backupThread = new BackupThread();
backupThread.start();
File dataDir = new File(master.getDataDir());
class CheckStatus extends Thread {
volatile String fail = null;
volatile String response = null;
volatile boolean success = false;
public void run() {
String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS;
URL url;
InputStream stream = null;
try {
url = new URL(masterUrl);
stream = url.openStream();
response = IOUtils.toString(stream);
if(response.contains("<str name=\"status\">success</str>")) {
success = true;
}
stream.close();
} catch (Exception e) {
fail = e.getMessage();
} finally {
IOUtils.closeQuietly(stream);
}
};
};
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
while(true) {
checkStatus.run();
if(checkStatus.fail != null) {
fail(checkStatus.fail);
}
if(checkStatus.success) {
break;
}
Thread.sleep(200);
if(waitCnt == 10) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
if(backupThread.fail != null) {
fail(backupThread.fail);
}
File[] files = dataDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if(name.startsWith("snapshot")) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
<<<<<<< MINE
assertEquals(nDocs, hits.totalHits);
=======
assertEquals(500, hits.totalHits);
>>>>>>> YOURS
searcher.close();
dir.close();
}
/* character copy of file using UTF-8 */
private static void copyFile(File src, File dst) throws IOException {
copyFile(src, dst, null);
}
/**
* character copy of file using UTF-8. If port is non-null, will be substituted any time "TEST_PORT" is found.
*/
private static void copyFile(File src, File dst, Integer port) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(src));
Writer out = new FileWriter(dst);
for (String line = in.readLine(); null != line; line = in.readLine()) {
if (null != port)
line = line.replace("TEST_PORT", port.toString());
out.write(line);
}
in.close();
out.close();
}
private static class SolrInstance {
String name;
Integer masterPort;
File homeDir;
File confDir;
File dataDir;
/**
* if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is
* on localhost at the specified port.
*/
public SolrInstance(String name, Integer port) {
this.name = name;
this.masterPort = port;
}
public String getHomeDir() {
return homeDir.toString();
}
public String getSchemaFile() {
return CONF_DIR + "schema-replication1.xml";
}
public String getConfDir() {
return confDir.toString();
}
public String getDataDir() {
return dataDir.toString();
}
public String getSolrConfigFile() {
String fname = "";
if (null == masterPort)
fname = CONF_DIR + "solrconfig-master.xml";
else
fname = SLAVE_CONFIG;
return fname;
}
public void setUp() throws Exception {
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
File home = new File(TEMP_DIR,
getClass().getName() + "-" + System.currentTimeMillis());
if (null == masterPort) {
homeDir = new File(home, "master");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
} else {
homeDir = new File(home, "slave");
dataDir = new File(homeDir, "data");
confDir = new File(homeDir, "conf");
}
homeDir.mkdirs();
dataDir.mkdirs();
confDir.mkdirs();
File f = new File(confDir, "solrconfig.xml");
copyFile(new File(getSolrConfigFile()), f, masterPort);
f = new File(confDir, "schema.xml");
copyFile(new File(getSchemaFile()), f);
}
public void tearDown() throws Exception {
AbstractSolrTestCase.recurseDelete(homeDir);
}
}
}
Diff Result
No diff
Case 33 - java_lucenesolr.rev_4469c_f32ac..CompoundFileDirectory.java
Base
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
private final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION));
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
ensureOpen();
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id) throws IOException {
// Default to readBufferSize passed in when we were opened
return openInput(id, readBufferSize);
}
@Override
public synchronized IndexInput openInput(String id, int readBufferSize) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
initWriter();
return writer.createOutput(name);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public final CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name)
throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
private final void initWriter() {
assert openForWrite;
assert entries == SENTINEL;
if (writer == null) {
writer = new CompoundFileWriter(directory, fileName);
}
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
private final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION));
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
ensureOpen();
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id) throws IOException {
// Default to readBufferSize passed in when we were opened
return openInput(id, readBufferSize);
}
@Override
public synchronized IndexInput openInput(String id, int readBufferSize) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
initWriter();
return writer.createOutput(name);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public final CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name)
throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
private final void initWriter() {
assert openForWrite;
assert entries == SENTINEL;
if (writer == null) {
writer = new CompoundFileWriter(directory, fileName);
}
}
}
Left
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
private final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION));
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
return; // already closed
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id) throws IOException {
// Default to readBufferSize passed in when we were opened
return openInput(id, readBufferSize);
}
@Override
public synchronized IndexInput openInput(String id, int readBufferSize) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
return writer.createOutput(name);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name)
throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length)
throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input,
CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length,
int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
private final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION));
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
return; // already closed
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id) throws IOException {
// Default to readBufferSize passed in when we were opened
return openInput(id, readBufferSize);
}
@Override
public synchronized IndexInput openInput(String id, int readBufferSize) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
return writer.createOutput(name);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name)
throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length)
throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input,
CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length,
int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
}
Right
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = BufferedIndexInput.bufferSize(context);
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
ensureOpen();
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
initWriter();
return writer.createOutput(name, context);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context)
throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
private final void initWriter() {
assert openForWrite;
assert entries == SENTINEL;
if (writer == null) {
writer = new CompoundFileWriter(directory, fileName);
}
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = BufferedIndexInput.bufferSize(context);
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
ensureOpen();
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
initWriter();
return writer.createOutput(name, context);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context)
throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
private final void initWriter() {
assert openForWrite;
assert entries == SENTINEL;
if (writer == null) {
writer = new CompoundFileWriter(directory, fileName);
}
}
}
MergeMethods
package org.apache.lucene.store;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String, FileEntry> entries;
private boolean openForWrite;
private static final Map<String, FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String, FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String, FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "", IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
// unused right now
final int readInt = input.readInt();
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream, int firstInt) throws CorruptIndexException, IOException {
final Map<String, FileEntry> entries = new HashMap<String, FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: " + firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i = 0; i < count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
// already closed
return;
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
return writer.createOutput(name);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context) throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length) throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input, CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
}
package org.apache.lucene.store;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String, FileEntry> entries;
private boolean openForWrite;
private static final Map<String, FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String, FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String, FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "", IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
// unused right now
final int readInt = input.readInt();
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream, int firstInt) throws CorruptIndexException, IOException {
final Map<String, FileEntry> entries = new HashMap<String, FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: " + firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i = 0; i < count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
// already closed
return;
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
return writer.createOutput(name);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context) throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length) throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input, CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
}
KeepBothMethods
package org.apache.lucene.store;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String, FileEntry> entries;
private boolean openForWrite;
private static final Map<String, FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = BufferedIndexInput.bufferSize(context);
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String, FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String, FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "", IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
// unused right now
final int readInt = input.readInt();
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream, int firstInt) throws CorruptIndexException, IOException {
final Map<String, FileEntry> entries = new HashMap<String, FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: " + firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i = 0; i < count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
// already closed
return;
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
return writer.createOutput(name);
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
initWriter();
return writer.createOutput(name, context);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length) throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input, CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
}
package org.apache.lucene.store;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String, FileEntry> entries;
private boolean openForWrite;
private static final Map<String, FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = BufferedIndexInput.bufferSize(context);
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String, FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String, FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "", IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
// unused right now
final int readInt = input.readInt();
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream, int firstInt) throws CorruptIndexException, IOException {
final Map<String, FileEntry> entries = new HashMap<String, FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: " + firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i = 0; i < count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
// already closed
return;
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
return writer.createOutput(name);
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
initWriter();
return writer.createOutput(name, context);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length) throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input, CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
}
Safe
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
<<<<<<< MINE
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = BufferedIndexInput.bufferSize(context);
this.isOpen = false;
}
=======
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
>>>>>>> YOURS
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
return; // already closed
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
return writer.createOutput(name);
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
initWriter();
return writer.createOutput(name, context);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name)
throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length)
throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input,
CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length,
int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context)
throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
<<<<<<< MINE
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = BufferedIndexInput.bufferSize(context);
this.isOpen = false;
}
=======
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = readBufferSize;
this.isOpen = false;
}
>>>>>>> YOURS
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
return; // already closed
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
return writer.createOutput(name);
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
initWriter();
return writer.createOutput(name, context);
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name)
throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length)
throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input,
CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length,
int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context)
throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
}
}
Unstructured
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
<<<<<<< MINE
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
=======
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
>>>>>>> YOURS
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = BufferedIndexInput.bufferSize(context);
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
return; // already closed
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
<<<<<<< MINE
return writer.createOutput(name);
=======
initWriter();
return writer.createOutput(name, context);
>>>>>>> YOURS
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
<<<<<<< MINE
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
=======
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
>>>>>>> YOURS
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context)
throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length)
throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input,
CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length,
int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
}package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.IOUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
* @lucene.experimental
*/
public abstract class CompoundFileDirectory extends Directory {
/** Offset/Length for a slice inside of a compound file */
public static final class FileEntry {
long offset;
long length;
}
private final Directory directory;
private final String fileName;
protected final int readBufferSize;
private Map<String,FileEntry> entries;
private boolean openForWrite;
private static final Map<String,FileEntry> SENTINEL = Collections.emptyMap();
private CompoundFileWriter writer;
/**
* Create a new CompoundFileDirectory.
* <p>
* NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
*/
<<<<<<< MINE
public CompoundFileDirectory(Directory directory, String fileName, int readBufferSize) throws IOException {
=======
public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
>>>>>>> YOURS
this.directory = directory;
this.fileName = fileName;
this.readBufferSize = BufferedIndexInput.bufferSize(context);
this.isOpen = false;
}
/** Initialize with a map of filename->slices */
protected final void initForRead(Map<String,FileEntry> entries) {
this.entries = entries;
this.isOpen = true;
this.openForWrite = false;
}
protected final void initForWrite() {
assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
this.entries = SENTINEL;
this.openForWrite = true;
this.isOpen = true;
writer = new CompoundFileWriter(directory, fileName);
}
/** Helper method that reads CFS entries from an input stream */
public static final Map<String,FileEntry> readEntries(IndexInput stream, Directory dir, String name) throws IOException {
// read the first VInt. If it is negative, it's the version number
// otherwise it's the count (pre-3.1 indexes)
final int firstInt = stream.readVInt();
if (firstInt == CompoundFileWriter.FORMAT_CURRENT) {
IndexInput input = null;
try {
input = dir.openInput(IndexFileNames.segmentFileName(IndexFileNames.stripExtension(name), "",
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION), IOContext.READONCE);
final int readInt = input.readInt(); // unused right now
assert readInt == CompoundFileWriter.ENTRY_FORMAT_CURRENT;
final int numEntries = input.readVInt();
final Map<String, FileEntry> mapping = new HashMap<String, CompoundFileDirectory.FileEntry>(
numEntries);
for (int i = 0; i < numEntries; i++) {
final FileEntry fileEntry = new FileEntry();
mapping.put(input.readString(), fileEntry);
fileEntry.offset = input.readLong();
fileEntry.length = input.readLong();
}
return mapping;
} finally {
IOUtils.closeSafely(true, input);
}
}
// TODO remove once 3.x is not supported anymore
return readLegacyEntries(stream, firstInt);
}
private static Map<String, FileEntry> readLegacyEntries(IndexInput stream,
int firstInt) throws CorruptIndexException, IOException {
final Map<String,FileEntry> entries = new HashMap<String,FileEntry>();
final int count;
final boolean stripSegmentName;
if (firstInt < CompoundFileWriter.FORMAT_PRE_VERSION) {
if (firstInt < CompoundFileWriter.FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: "
+ firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT);
}
// It's a post-3.1 index, read the count.
count = stream.readVInt();
stripSegmentName = false;
} else {
count = firstInt;
stripSegmentName = true;
}
// read the directory and init files
long streamLength = stream.length();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
if (offset < 0 || offset > streamLength) {
throw new CorruptIndexException("Invalid CFS entry offset: " + offset);
}
String id = stream.readString();
if (stripSegmentName) {
// Fix the id to not include the segment names. This is relevant for
// pre-3.1 indexes.
id = IndexFileNames.stripSegmentName(id);
}
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = streamLength - entry.offset;
}
return entries;
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (!isOpen) {
// allow double close - usually to be consistent with other closeables
assert entries == null;
return; // already closed
}
entries = null;
isOpen = false;
if (writer != null) {
assert openForWrite;
writer.close();
}
}
@Override
public synchronized IndexInput openInput(String id, IOContext context) throws IOException {
ensureOpen();
assert !openForWrite;
id = IndexFileNames.stripSegmentName(id);
final FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found (files: " + entries.keySet() + ")");
return openInputSlice(id, entry.offset, entry.length, readBufferSize);
}
/** Return an IndexInput that represents a "slice" or portion of the CFS file. */
public abstract IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException;
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
if (this.writer != null) {
return writer.fileExists(name);
}
return entries.containsKey(IndexFileNames.stripSegmentName(name));
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
ensureOpen();
return directory.fileModified(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name) {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to) {
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
if (this.writer != null) {
return writer.fileLenght(name);
}
FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
if (e == null)
throw new FileNotFoundException(name);
return e.length;
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
<<<<<<< MINE
return writer.createOutput(name);
=======
initWriter();
return writer.createOutput(name, context);
>>>>>>> YOURS
}
@Override
public void sync(Collection<String> names) throws IOException {
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name) {
throw new UnsupportedOperationException();
}
@Override
<<<<<<< MINE
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
if (fileEntry == null) {
throw new FileNotFoundException("file " + name + " does not exists in this CFS");
}
return new NestedCompoundFileDirectory(name, bufferSize, fileEntry.offset, fileEntry.length);
=======
public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
// NOTE: final to make nested compounding impossible.
throw new UnsupportedOperationException();
>>>>>>> YOURS
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public CompoundFileDirectory createCompoundOutput(String name, IOContext context)
throws IOException {
throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
}
private class NestedCompoundFileDirectory extends CompoundFileDirectory {
private final long cfsOffset;
private final long cfsLength;
public NestedCompoundFileDirectory(String fileName, int readBufferSize, long offset, long length)
throws IOException {
super(directory, fileName, readBufferSize);
this.cfsOffset = offset;
this.cfsLength = length;
IndexInput input = null;
try {
input = CompoundFileDirectory.this.openInput(fileName, 128);
initForRead(CompoundFileDirectory.readEntries(input,
CompoundFileDirectory.this, fileName));
} finally {
IOUtils.closeSafely(false, input);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length,
int readBufferSize) throws IOException {
assert offset + length <= cfsLength;
return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
}
}
}
Diff Result
No diff
Case 34 - java_lucenesolr.rev_6a128_03676..DocumentsWriter.java
Base
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMFile;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.RecyclingByteBlockAllocator;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.RamUsageEstimator;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
final AtomicLong bytesUsed = new AtomicLong(0);
IndexWriter writer;
Directory directory;
String segment; // Current segment we are working on
private int nextDocID; // Next docID to be added
private int numDocs; // # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
private final HashMap<Thread,DocumentsWriterThreadState> threadBindings = new HashMap<Thread,DocumentsWriterThreadState>();
boolean bufferIsFull; // True when it's time to write segment
private boolean aborting; // True if an abort is pending
PrintStream infoStream;
Similarity similarity;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
private final int maxThreadStates;
// Deletes for our still-in-RAM (to be flushed next) segment
private SegmentDeletes pendingDeletes = new SegmentDeletes();
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
Similarity similarity;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
/** Consumer returns this on each doc. This holds any
* state that must be flushed synchronized "in docID
* order". We gather these and flush them in order. */
abstract static class DocWriter {
DocWriter next;
int docID;
abstract void finish() throws IOException;
abstract void abort();
abstract long sizeInBytes();
void setNext(DocWriter next) {
this.next = next;
}
}
/**
* Create and return a new DocWriterBuffer.
*/
PerDocBuffer newPerDocBuffer() {
return new PerDocBuffer();
}
/**
* RAMFile buffer for DocWriters.
*/
@SuppressWarnings("serial")
class PerDocBuffer extends RAMFile {
/**
* Allocate bytes used from shared pool.
*/
@Override
protected byte[] newBuffer(int size) {
assert size == PER_DOC_BLOCK_SIZE;
return perDocAllocator.getByteBlock();
}
/**
* Recycle the bytes used.
*/
synchronized void recycle() {
if (buffers.size() > 0) {
setLength(0);
// Recycle the blocks
perDocAllocator.recycleByteBlocks(buffers);
buffers.clear();
sizeInBytes = 0;
assert numBuffers() == 0;
}
}
}
/**
* The IndexingChain must define the {@link #getChain(DocumentsWriter)} method
* which returns the DocConsumer that the DocumentsWriter calls to process the
* documents.
*/
abstract static class IndexingChain {
abstract DocConsumer getChain(DocumentsWriter documentsWriter);
}
static final IndexingChain defaultIndexingChain = new IndexingChain() {
@Override
DocConsumer getChain(DocumentsWriter documentsWriter) {
/*
This is the current indexing chain:
DocConsumer / DocConsumerPerThread
--> code: DocFieldProcessor / DocFieldProcessorPerThread
--> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
--> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
--> code: DocInverter / DocInverterPerThread / DocInverterPerField
--> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: TermsHash / TermsHashPerThread / TermsHashPerField
--> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
--> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
--> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
--> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
--> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
*/
// Build up indexing chain:
final TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(documentsWriter);
final TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();
/*
* nesting TermsHash instances here to allow the secondary (TermVectors) share the interned postings
* via a shared ByteBlockPool. See TermsHashPerField for details.
*/
final TermsHash termVectorsTermHash = new TermsHash(documentsWriter, false, termVectorsWriter, null);
final InvertedDocConsumer termsHash = new TermsHash(documentsWriter, true, freqProxWriter, termVectorsTermHash);
final NormsWriter normsWriter = new NormsWriter();
final DocInverter docInverter = new DocInverter(termsHash, normsWriter);
return new DocFieldProcessor(documentsWriter, docInverter);
}
};
final DocConsumer consumer;
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
private long waitQueuePauseBytes = (long) (ramBufferSize*0.1);
private long waitQueueResumeBytes = (long) (ramBufferSize*0.05);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
private long freeLevel = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*0.95);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private boolean closed;
private final FieldInfos fieldInfos;
private final BufferedDeletes bufferedDeletes;
private final IndexWriter.FlushControl flushControl;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarity = writer.getConfig().getSimilarity();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
synchronized void deleteDocID(int docIDUpto) {
pendingDeletes.addDocID(docIDUpto);
// NOTE: we do not trigger flush here. This is
// potentially a RAM leak, if you have an app that tries
// to add docs but every single doc always hits a
// non-aborting exception. Allowing a flush here gets
// very messy because we are only invoked when handling
// exceptions so to do this properly, while handling an
// exception we'd have to go off and flush new deletes
// which is risky (likely would hit some other
// confounding exception).
}
boolean deleteQueries(Query... queries) {
final boolean doFlush = flushControl.waitUpdate(0, queries.length);
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, numDocs);
}
}
return doFlush;
}
boolean deleteQuery(Query query) {
final boolean doFlush = flushControl.waitUpdate(0, 1);
synchronized(this) {
pendingDeletes.addQuery(query, numDocs);
}
return doFlush;
}
boolean deleteTerms(Term... terms) {
final boolean doFlush = flushControl.waitUpdate(0, terms.length);
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, numDocs);
}
}
return doFlush;
}
boolean deleteTerm(Term term, boolean skipWait) {
final boolean doFlush = flushControl.waitUpdate(0, 1, skipWait);
synchronized(this) {
pendingDeletes.addTerm(term, numDocs);
}
return doFlush;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.infoStream = infoStream;
}
}
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarity = similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
waitQueuePauseBytes = 4*1024*1024;
waitQueueResumeBytes = 2*1024*1024;
} else {
ramBufferSize = (long) (mb*1024*1024);
waitQueuePauseBytes = (long) (ramBufferSize*0.1);
waitQueueResumeBytes = (long) (ramBufferSize*0.05);
freeLevel = (long) (0.95 * ramBufferSize);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
synchronized String getSegment() {
return segment;
}
/** Returns how many docs are currently buffered in RAM. */
synchronized int getNumDocs() {
return numDocs;
}
void message(String message) {
if (infoStream != null) {
writer.message("DW: " + message);
}
}
synchronized void setAborting() {
if (infoStream != null) {
message("setAborting");
}
aborting = true;
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
if (infoStream != null) {
message("docWriter: abort");
}
boolean success = false;
try {
// Forcefully remove waiting ThreadStates from line
waitQueue.abort();
// Wait for all other threads to finish with
// DocumentsWriter:
waitIdle();
if (infoStream != null) {
message("docWriter: abort waitIdle done");
}
assert 0 == waitQueue.numWaiting: "waitQueue.numWaiting=" + waitQueue.numWaiting;
waitQueue.waitingBytes = 0;
pendingDeletes.clear();
for (DocumentsWriterThreadState threadState : threadStates)
try {
threadState.consumer.abort();
} catch (Throwable t) {
}
try {
consumer.abort();
} catch (Throwable t) {
}
// Reset all postings data
doAfterFlush();
success = true;
} finally {
aborting = false;
notifyAll();
if (infoStream != null) {
message("docWriter: done abort; success=" + success);
}
}
}
/** Reset after a flush */
private void doAfterFlush() throws IOException {
// All ThreadStates should be idle when we are called
assert allThreadsIdle();
threadBindings.clear();
waitQueue.reset();
segment = null;
numDocs = 0;
nextDocID = 0;
bufferIsFull = false;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].doAfterFlush();
}
}
private synchronized boolean allThreadsIdle() {
for(int i=0;i<threadStates.length;i++) {
if (!threadStates[i].isIdle) {
return false;
}
}
return true;
}
synchronized boolean anyChanges() {
return numDocs != 0 || pendingDeletes.any();
}
// for testing
public SegmentDeletes getPendingDeletes() {
return pendingDeletes;
}
private void pushDeletes(SegmentInfo newSegment, SegmentInfos segmentInfos) {
// Lock order: DW -> BD
if (pendingDeletes.any()) {
if (newSegment != null) {
if (infoStream != null) {
message("flush: push buffered deletes to newSegment");
}
bufferedDeletes.pushDeletes(pendingDeletes, newSegment);
} else if (segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(pendingDeletes, segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
pendingDeletes = new SegmentDeletes();
}
}
public boolean anyDeletions() {
return pendingDeletes.any();
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
synchronized SegmentInfo flush(IndexWriter writer, IndexFileDeleter deleter, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
final long startTime = System.currentTimeMillis();
// We change writer's segmentInfos:
assert Thread.holdsLock(writer);
waitIdle();
if (numDocs == 0) {
// nothing to do!
if (infoStream != null) {
message("flush: no docs; skipping");
}
// Lock order: IW -> DW -> BD
pushDeletes(null, segmentInfos);
return null;
}
if (aborting) {
if (infoStream != null) {
message("flush: skip because aborting is set");
}
return null;
}
boolean success = false;
SegmentInfo newSegment;
try {
assert nextDocID == numDocs;
assert waitQueue.numWaiting == 0;
assert waitQueue.waitingBytes == 0;
if (infoStream != null) {
message("flush postings as segment " + segment + " numDocs=" + numDocs);
}
final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos,
numDocs, writer.getConfig().getTermIndexInterval(),
SegmentCodecs.build(fieldInfos, writer.codecs));
newSegment = new SegmentInfo(segment, numDocs, directory, false, fieldInfos.hasProx(), flushState.segmentCodecs, false);
Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
for (DocumentsWriterThreadState threadState : threadStates) {
threads.add(threadState.consumer);
}
double startMBUsed = bytesUsed()/1024./1024.;
consumer.flush(threads, flushState);
newSegment.setHasVectors(flushState.hasVectors);
if (infoStream != null) {
message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors"));
message("flushedFiles=" + newSegment.files());
message("flushed codecs=" + newSegment.getSegmentCodecs());
}
if (mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
final String cfsFileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.COMPOUND_FILE_EXTENSION);
if (infoStream != null) {
message("flush: create compound file \"" + cfsFileName + "\"");
}
CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
for(String fileName : newSegment.files()) {
cfsWriter.addFile(fileName);
}
cfsWriter.close();
deleter.deleteNewFiles(newSegment.files());
newSegment.setUseCompoundFile(true);
}
if (infoStream != null) {
message("flush: segment=" + newSegment);
final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.;
final double newSegmentSize = newSegment.sizeInBytes(true)/1024./1024.;
message(" ramUsed=" + nf.format(startMBUsed) + " MB" +
" newFlushedSize=" + nf.format(newSegmentSize) + " MB" +
" (" + nf.format(newSegmentSizeNoStore) + " MB w/o doc stores)" +
" docs/MB=" + nf.format(numDocs / newSegmentSize) +
" new/old=" + nf.format(100.0 * newSegmentSizeNoStore / startMBUsed) + "%");
}
success = true;
} finally {
notifyAll();
if (!success) {
if (segment != null) {
deleter.refresh(segment);
}
abort();
}
}
doAfterFlush();
// Lock order: IW -> DW -> BD
pushDeletes(newSegment, segmentInfos);
if (infoStream != null) {
message("flush time " + (System.currentTimeMillis()-startTime) + " msec");
}
return newSegment;
}
synchronized void close() {
closed = true;
notifyAll();
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm) throws IOException {
final Thread currentThread = Thread.currentThread();
assert !Thread.holdsLock(writer);
// First, find a thread state. If this thread already
// has affinity to a specific ThreadState, use that one
// again.
DocumentsWriterThreadState state = threadBindings.get(currentThread);
if (state == null) {
// First time this thread has called us since last
// flush. Find the least loaded thread state:
DocumentsWriterThreadState minThreadState = null;
for(int i=0;i<threadStates.length;i++) {
DocumentsWriterThreadState ts = threadStates[i];
if (minThreadState == null || ts.numThreads < minThreadState.numThreads) {
minThreadState = ts;
}
}
if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.length >= maxThreadStates)) {
state = minThreadState;
state.numThreads++;
} else {
// Just create a new "private" thread state
DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1+threadStates.length];
if (threadStates.length > 0) {
System.arraycopy(threadStates, 0, newArray, 0, threadStates.length);
}
state = newArray[threadStates.length] = new DocumentsWriterThreadState(this);
threadStates = newArray;
}
threadBindings.put(currentThread, state);
}
// Next, wait until my thread state is idle (in case
// it's shared with other threads), and no flush/abort
// pending
waitReady(state);
// Allocate segment name if this is the first doc since
// last flush:
if (segment == null) {
segment = writer.newSegmentName();
assert numDocs == 0;
}
state.docState.docID = nextDocID++;
if (delTerm != null) {
pendingDeletes.addTerm(delTerm, state.docState.docID);
}
numDocs++;
state.isIdle = false;
return state;
}
boolean addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
return updateDocument(doc, analyzer, null);
}
boolean updateDocument(Document doc, Analyzer analyzer, Term delTerm)
throws CorruptIndexException, IOException {
// Possibly trigger a flush, or wait until any running flush completes:
boolean doFlush = flushControl.waitUpdate(1, delTerm != null ? 1 : 0);
// This call is synchronized but fast
final DocumentsWriterThreadState state = getThreadState(doc, delTerm);
final DocState docState = state.docState;
docState.doc = doc;
docState.analyzer = analyzer;
boolean success = false;
try {
// This call is not synchronized and does all the
// work
final DocWriter perDoc;
try {
perDoc = state.consumer.processDocument();
} finally {
docState.clear();
}
// This call is synchronized but fast
finishDocument(state, perDoc);
success = true;
} finally {
if (!success) {
// If this thread state had decided to flush, we
// must clear it so another thread can flush
if (doFlush) {
flushControl.clearFlushPending();
}
if (infoStream != null) {
message("exception in updateDocument aborting=" + aborting);
}
synchronized(this) {
state.isIdle = true;
notifyAll();
if (aborting) {
abort();
} else {
skipDocWriter.docID = docState.docID;
boolean success2 = false;
try {
waitQueue.add(skipDocWriter);
success2 = true;
} finally {
if (!success2) {
abort();
return false;
}
}
// Immediately mark this document as deleted
// since likely it was partially added. This
// keeps indexing as "all or none" (atomic) when
// adding a document:
deleteDocID(state.docState.docID);
}
}
}
}
doFlush |= flushControl.flushByRAMUsage("new document");
return doFlush;
}
public synchronized void waitIdle() {
while (!allThreadsIdle()) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
}
synchronized void waitReady(DocumentsWriterThreadState state) {
while (!closed && (!state.isIdle || aborting)) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Does the synchronized work to finish/flush the
* inverted document. */
private void finishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter) throws IOException {
// Must call this w/o holding synchronized(this) else
// we'll hit deadlock:
balanceRAM();
synchronized(this) {
assert docWriter == null || docWriter.docID == perThread.docState.docID;
if (aborting) {
// We are currently aborting, and another thread is
// waiting for me to become idle. We just forcefully
// idle this threadState; it will be fully reset by
// abort()
if (docWriter != null) {
try {
docWriter.abort();
} catch (Throwable t) {
}
}
perThread.isIdle = true;
// wakes up any threads waiting on the wait queue
notifyAll();
return;
}
final boolean doPause;
if (docWriter != null) {
doPause = waitQueue.add(docWriter);
} else {
skipDocWriter.docID = perThread.docState.docID;
doPause = waitQueue.add(skipDocWriter);
}
if (doPause) {
waitForWaitQueue();
}
perThread.isIdle = true;
// wakes up any threads waiting on the wait queue
notifyAll();
}
}
synchronized void waitForWaitQueue() {
do {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
} while (!waitQueue.doResume());
}
private static class SkipDocWriter extends DocWriter {
@Override
void finish() {
}
@Override
void abort() {
}
@Override
long sizeInBytes() {
return 0;
}
}
final SkipDocWriter skipDocWriter = new SkipDocWriter();
NumberFormat nf = NumberFormat.getInstance();
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK;
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
final static int MAX_TERM_LENGTH_UTF8 = BYTE_BLOCK_SIZE-2;
/* Initial chunks size of the shared int[] blocks used to
store postings data */
final static int INT_BLOCK_SHIFT = 13;
final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
private List<int[]> freeIntBlocks = new ArrayList<int[]>();
/* Allocate another int[] from the shared pool */
synchronized int[] getIntBlock() {
final int size = freeIntBlocks.size();
final int[] b;
if (0 == size) {
b = new int[INT_BLOCK_SIZE];
bytesUsed.addAndGet(INT_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_INT);
} else {
b = freeIntBlocks.remove(size-1);
}
return b;
}
long bytesUsed() {
return bytesUsed.get() + pendingDeletes.bytesUsed.get();
}
/* Return int[]s to the pool */
synchronized void recycleIntBlocks(int[][] blocks, int start, int end) {
for(int i=start;i<end;i++) {
freeIntBlocks.add(blocks[i]);
blocks[i] = null;
}
}
final RecyclingByteBlockAllocator byteBlockAllocator = new RecyclingByteBlockAllocator(BYTE_BLOCK_SIZE, Integer.MAX_VALUE, bytesUsed);
final static int PER_DOC_BLOCK_SIZE = 1024;
final RecyclingByteBlockAllocator perDocAllocator = new RecyclingByteBlockAllocator(PER_DOC_BLOCK_SIZE, Integer.MAX_VALUE, bytesUsed);
String toMB(long v) {
return nf.format(v/1024./1024.);
}
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
void balanceRAM() {
final boolean doBalance;
final long deletesRAMUsed;
deletesRAMUsed = bufferedDeletes.bytesUsed();
synchronized(this) {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
return;
}
doBalance = bytesUsed() + deletesRAMUsed >= ramBufferSize;
}
if (doBalance) {
if (infoStream != null) {
message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
" vs trigger=" + toMB(ramBufferSize) +
" deletesMB=" + toMB(deletesRAMUsed) +
" byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
" perDocFree=" + toMB(perDocAllocator.bytesUsed()));
}
final long startBytesUsed = bytesUsed() + deletesRAMUsed;
int iter = 0;
// We free equally from each pool in 32 KB
// chunks until we are below our threshold
// (freeLevel)
boolean any = true;
while(bytesUsed()+deletesRAMUsed > freeLevel) {
synchronized(this) {
if (0 == perDocAllocator.numBufferedBlocks() &&
0 == byteBlockAllocator.numBufferedBlocks() &&
0 == freeIntBlocks.size() && !any) {
// Nothing else to free -- must flush now.
bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
if (infoStream != null) {
if (bytesUsed()+deletesRAMUsed > ramBufferSize) {
message(" nothing to free; set bufferIsFull");
} else {
message(" nothing to free");
}
}
break;
}
if ((0 == iter % 4) && byteBlockAllocator.numBufferedBlocks() > 0) {
byteBlockAllocator.freeBlocks(1);
}
if ((1 == iter % 4) && freeIntBlocks.size() > 0) {
freeIntBlocks.remove(freeIntBlocks.size()-1);
bytesUsed.addAndGet(-INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT);
}
if ((2 == iter % 4) && perDocAllocator.numBufferedBlocks() > 0) {
perDocAllocator.freeBlocks(32); // Remove upwards of 32 blocks (each block is 1K)
}
}
if ((3 == iter % 4) && any) {
// Ask consumer to free any recycled state
any = consumer.freeRAM();
}
iter++;
}
if (infoStream != null) {
message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
}
}
}
final WaitQueue waitQueue = new WaitQueue();
private class WaitQueue {
DocWriter[] waiting;
int nextWriteDocID;
int nextWriteLoc;
int numWaiting;
long waitingBytes;
public WaitQueue() {
waiting = new DocWriter[10];
}
synchronized void reset() {
// NOTE: nextWriteLoc doesn't need to be reset
assert numWaiting == 0;
assert waitingBytes == 0;
nextWriteDocID = 0;
}
synchronized boolean doResume() {
return waitingBytes <= waitQueueResumeBytes;
}
synchronized boolean doPause() {
return waitingBytes > waitQueuePauseBytes;
}
synchronized void abort() {
int count = 0;
for(int i=0;i<waiting.length;i++) {
final DocWriter doc = waiting[i];
if (doc != null) {
doc.abort();
waiting[i] = null;
count++;
}
}
waitingBytes = 0;
assert count == numWaiting;
numWaiting = 0;
}
private void writeDocument(DocWriter doc) throws IOException {
assert doc == skipDocWriter || nextWriteDocID == doc.docID;
boolean success = false;
try {
doc.finish();
nextWriteDocID++;
nextWriteLoc++;
assert nextWriteLoc <= waiting.length;
if (nextWriteLoc == waiting.length) {
nextWriteLoc = 0;
}
success = true;
} finally {
if (!success) {
setAborting();
}
}
}
synchronized public boolean add(DocWriter doc) throws IOException {
assert doc.docID >= nextWriteDocID;
if (doc.docID == nextWriteDocID) {
writeDocument(doc);
while(true) {
doc = waiting[nextWriteLoc];
if (doc != null) {
numWaiting--;
waiting[nextWriteLoc] = null;
waitingBytes -= doc.sizeInBytes();
writeDocument(doc);
} else {
break;
}
}
} else {
// I finished before documents that were added
// before me. This can easily happen when I am a
// small doc and the docs before me were large, or,
// just due to luck in the thread scheduling. Just
// add myself to the queue and when that large doc
// finishes, it will flush me:
int gap = doc.docID - nextWriteDocID;
if (gap >= waiting.length) {
// Grow queue
DocWriter[] newArray = new DocWriter[ArrayUtil.oversize(gap, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
assert nextWriteLoc >= 0;
System.arraycopy(waiting, nextWriteLoc, newArray, 0, waiting.length-nextWriteLoc);
System.arraycopy(waiting, 0, newArray, waiting.length-nextWriteLoc, nextWriteLoc);
nextWriteLoc = 0;
waiting = newArray;
gap = doc.docID - nextWriteDocID;
}
int loc = nextWriteLoc + gap;
if (loc >= waiting.length) {
loc -= waiting.length;
}
// We should only wrap one time
assert loc < waiting.length;
// Nobody should be in my spot!
assert waiting[loc] == null;
waiting[loc] = doc;
numWaiting++;
waitingBytes += doc.sizeInBytes();
}
return doPause();
}
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMFile;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.RecyclingByteBlockAllocator;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.RamUsageEstimator;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
final AtomicLong bytesUsed = new AtomicLong(0);
IndexWriter writer;
Directory directory;
String segment; // Current segment we are working on
private int nextDocID; // Next docID to be added
private int numDocs; // # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
private final HashMap<Thread,DocumentsWriterThreadState> threadBindings = new HashMap<Thread,DocumentsWriterThreadState>();
boolean bufferIsFull; // True when it's time to write segment
private boolean aborting; // True if an abort is pending
PrintStream infoStream;
Similarity similarity;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
private final int maxThreadStates;
// Deletes for our still-in-RAM (to be flushed next) segment
private SegmentDeletes pendingDeletes = new SegmentDeletes();
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
Similarity similarity;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
/** Consumer returns this on each doc. This holds any
* state that must be flushed synchronized "in docID
* order". We gather these and flush them in order. */
abstract static class DocWriter {
DocWriter next;
int docID;
abstract void finish() throws IOException;
abstract void abort();
abstract long sizeInBytes();
void setNext(DocWriter next) {
this.next = next;
}
}
/**
* Create and return a new DocWriterBuffer.
*/
PerDocBuffer newPerDocBuffer() {
return new PerDocBuffer();
}
/**
* RAMFile buffer for DocWriters.
*/
@SuppressWarnings("serial")
class PerDocBuffer extends RAMFile {
/**
* Allocate bytes used from shared pool.
*/
@Override
protected byte[] newBuffer(int size) {
assert size == PER_DOC_BLOCK_SIZE;
return perDocAllocator.getByteBlock();
}
/**
* Recycle the bytes used.
*/
synchronized void recycle() {
if (buffers.size() > 0) {
setLength(0);
// Recycle the blocks
perDocAllocator.recycleByteBlocks(buffers);
buffers.clear();
sizeInBytes = 0;
assert numBuffers() == 0;
}
}
}
/**
* The IndexingChain must define the {@link #getChain(DocumentsWriter)} method
* which returns the DocConsumer that the DocumentsWriter calls to process the
* documents.
*/
abstract static class IndexingChain {
abstract DocConsumer getChain(DocumentsWriter documentsWriter);
}
static final IndexingChain defaultIndexingChain = new IndexingChain() {
@Override
DocConsumer getChain(DocumentsWriter documentsWriter) {
/*
This is the current indexing chain:
DocConsumer / DocConsumerPerThread
--> code: DocFieldProcessor / DocFieldProcessorPerThread
--> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
--> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
--> code: DocInverter / DocInverterPerThread / DocInverterPerField
--> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: TermsHash / TermsHashPerThread / TermsHashPerField
--> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
--> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
--> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
--> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
--> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
*/
// Build up indexing chain:
final TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(documentsWriter);
final TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();
/*
* nesting TermsHash instances here to allow the secondary (TermVectors) share the interned postings
* via a shared ByteBlockPool. See TermsHashPerField for details.
*/
final TermsHash termVectorsTermHash = new TermsHash(documentsWriter, false, termVectorsWriter, null);
final InvertedDocConsumer termsHash = new TermsHash(documentsWriter, true, freqProxWriter, termVectorsTermHash);
final NormsWriter normsWriter = new NormsWriter();
final DocInverter docInverter = new DocInverter(termsHash, normsWriter);
return new DocFieldProcessor(documentsWriter, docInverter);
}
};
final DocConsumer consumer;
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
private long waitQueuePauseBytes = (long) (ramBufferSize*0.1);
private long waitQueueResumeBytes = (long) (ramBufferSize*0.05);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
private long freeLevel = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*0.95);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private boolean closed;
private final FieldInfos fieldInfos;
private final BufferedDeletes bufferedDeletes;
private final IndexWriter.FlushControl flushControl;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarity = writer.getConfig().getSimilarity();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
synchronized void deleteDocID(int docIDUpto) {
pendingDeletes.addDocID(docIDUpto);
// NOTE: we do not trigger flush here. This is
// potentially a RAM leak, if you have an app that tries
// to add docs but every single doc always hits a
// non-aborting exception. Allowing a flush here gets
// very messy because we are only invoked when handling
// exceptions so to do this properly, while handling an
// exception we'd have to go off and flush new deletes
// which is risky (likely would hit some other
// confounding exception).
}
boolean deleteQueries(Query... queries) {
final boolean doFlush = flushControl.waitUpdate(0, queries.length);
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, numDocs);
}
}
return doFlush;
}
boolean deleteQuery(Query query) {
final boolean doFlush = flushControl.waitUpdate(0, 1);
synchronized(this) {
pendingDeletes.addQuery(query, numDocs);
}
return doFlush;
}
boolean deleteTerms(Term... terms) {
final boolean doFlush = flushControl.waitUpdate(0, terms.length);
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, numDocs);
}
}
return doFlush;
}
boolean deleteTerm(Term term, boolean skipWait) {
final boolean doFlush = flushControl.waitUpdate(0, 1, skipWait);
synchronized(this) {
pendingDeletes.addTerm(term, numDocs);
}
return doFlush;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.infoStream = infoStream;
}
}
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarity = similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
waitQueuePauseBytes = 4*1024*1024;
waitQueueResumeBytes = 2*1024*1024;
} else {
ramBufferSize = (long) (mb*1024*1024);
waitQueuePauseBytes = (long) (ramBufferSize*0.1);
waitQueueResumeBytes = (long) (ramBufferSize*0.05);
freeLevel = (long) (0.95 * ramBufferSize);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
synchronized String getSegment() {
return segment;
}
/** Returns how many docs are currently buffered in RAM. */
synchronized int getNumDocs() {
return numDocs;
}
void message(String message) {
if (infoStream != null) {
writer.message("DW: " + message);
}
}
synchronized void setAborting() {
if (infoStream != null) {
message("setAborting");
}
aborting = true;
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
if (infoStream != null) {
message("docWriter: abort");
}
boolean success = false;
try {
// Forcefully remove waiting ThreadStates from line
waitQueue.abort();
// Wait for all other threads to finish with
// DocumentsWriter:
waitIdle();
if (infoStream != null) {
message("docWriter: abort waitIdle done");
}
assert 0 == waitQueue.numWaiting: "waitQueue.numWaiting=" + waitQueue.numWaiting;
waitQueue.waitingBytes = 0;
pendingDeletes.clear();
for (DocumentsWriterThreadState threadState : threadStates)
try {
threadState.consumer.abort();
} catch (Throwable t) {
}
try {
consumer.abort();
} catch (Throwable t) {
}
// Reset all postings data
doAfterFlush();
success = true;
} finally {
aborting = false;
notifyAll();
if (infoStream != null) {
message("docWriter: done abort; success=" + success);
}
}
}
/** Reset after a flush */
private void doAfterFlush() throws IOException {
// All ThreadStates should be idle when we are called
assert allThreadsIdle();
threadBindings.clear();
waitQueue.reset();
segment = null;
numDocs = 0;
nextDocID = 0;
bufferIsFull = false;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].doAfterFlush();
}
}
private synchronized boolean allThreadsIdle() {
for(int i=0;i<threadStates.length;i++) {
if (!threadStates[i].isIdle) {
return false;
}
}
return true;
}
synchronized boolean anyChanges() {
return numDocs != 0 || pendingDeletes.any();
}
// for testing
public SegmentDeletes getPendingDeletes() {
return pendingDeletes;
}
private void pushDeletes(SegmentInfo newSegment, SegmentInfos segmentInfos) {
// Lock order: DW -> BD
if (pendingDeletes.any()) {
if (newSegment != null) {
if (infoStream != null) {
message("flush: push buffered deletes to newSegment");
}
bufferedDeletes.pushDeletes(pendingDeletes, newSegment);
} else if (segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(pendingDeletes, segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
pendingDeletes = new SegmentDeletes();
}
}
public boolean anyDeletions() {
return pendingDeletes.any();
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
synchronized SegmentInfo flush(IndexWriter writer, IndexFileDeleter deleter, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
final long startTime = System.currentTimeMillis();
// We change writer's segmentInfos:
assert Thread.holdsLock(writer);
waitIdle();
if (numDocs == 0) {
// nothing to do!
if (infoStream != null) {
message("flush: no docs; skipping");
}
// Lock order: IW -> DW -> BD
pushDeletes(null, segmentInfos);
return null;
}
if (aborting) {
if (infoStream != null) {
message("flush: skip because aborting is set");
}
return null;
}
boolean success = false;
SegmentInfo newSegment;
try {
assert nextDocID == numDocs;
assert waitQueue.numWaiting == 0;
assert waitQueue.waitingBytes == 0;
if (infoStream != null) {
message("flush postings as segment " + segment + " numDocs=" + numDocs);
}
final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos,
numDocs, writer.getConfig().getTermIndexInterval(),
SegmentCodecs.build(fieldInfos, writer.codecs));
newSegment = new SegmentInfo(segment, numDocs, directory, false, fieldInfos.hasProx(), flushState.segmentCodecs, false);
Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
for (DocumentsWriterThreadState threadState : threadStates) {
threads.add(threadState.consumer);
}
double startMBUsed = bytesUsed()/1024./1024.;
consumer.flush(threads, flushState);
newSegment.setHasVectors(flushState.hasVectors);
if (infoStream != null) {
message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors"));
message("flushedFiles=" + newSegment.files());
message("flushed codecs=" + newSegment.getSegmentCodecs());
}
if (mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
final String cfsFileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.COMPOUND_FILE_EXTENSION);
if (infoStream != null) {
message("flush: create compound file \"" + cfsFileName + "\"");
}
CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
for(String fileName : newSegment.files()) {
cfsWriter.addFile(fileName);
}
cfsWriter.close();
deleter.deleteNewFiles(newSegment.files());
newSegment.setUseCompoundFile(true);
}
if (infoStream != null) {
message("flush: segment=" + newSegment);
final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.;
final double newSegmentSize = newSegment.sizeInBytes(true)/1024./1024.;
message(" ramUsed=" + nf.format(startMBUsed) + " MB" +
" newFlushedSize=" + nf.format(newSegmentSize) + " MB" +
" (" + nf.format(newSegmentSizeNoStore) + " MB w/o doc stores)" +
" docs/MB=" + nf.format(numDocs / newSegmentSize) +
" new/old=" + nf.format(100.0 * newSegmentSizeNoStore / startMBUsed) + "%");
}
success = true;
} finally {
notifyAll();
if (!success) {
if (segment != null) {
deleter.refresh(segment);
}
abort();
}
}
doAfterFlush();
// Lock order: IW -> DW -> BD
pushDeletes(newSegment, segmentInfos);
if (infoStream != null) {
message("flush time " + (System.currentTimeMillis()-startTime) + " msec");
}
return newSegment;
}
synchronized void close() {
closed = true;
notifyAll();
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm) throws IOException {
final Thread currentThread = Thread.currentThread();
assert !Thread.holdsLock(writer);
// First, find a thread state. If this thread already
// has affinity to a specific ThreadState, use that one
// again.
DocumentsWriterThreadState state = threadBindings.get(currentThread);
if (state == null) {
// First time this thread has called us since last
// flush. Find the least loaded thread state:
DocumentsWriterThreadState minThreadState = null;
for(int i=0;i<threadStates.length;i++) {
DocumentsWriterThreadState ts = threadStates[i];
if (minThreadState == null || ts.numThreads < minThreadState.numThreads) {
minThreadState = ts;
}
}
if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.length >= maxThreadStates)) {
state = minThreadState;
state.numThreads++;
} else {
// Just create a new "private" thread state
DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1+threadStates.length];
if (threadStates.length > 0) {
System.arraycopy(threadStates, 0, newArray, 0, threadStates.length);
}
state = newArray[threadStates.length] = new DocumentsWriterThreadState(this);
threadStates = newArray;
}
threadBindings.put(currentThread, state);
}
// Next, wait until my thread state is idle (in case
// it's shared with other threads), and no flush/abort
// pending
waitReady(state);
// Allocate segment name if this is the first doc since
// last flush:
if (segment == null) {
segment = writer.newSegmentName();
assert numDocs == 0;
}
state.docState.docID = nextDocID++;
if (delTerm != null) {
pendingDeletes.addTerm(delTerm, state.docState.docID);
}
numDocs++;
state.isIdle = false;
return state;
}
boolean addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
return updateDocument(doc, analyzer, null);
}
boolean updateDocument(Document doc, Analyzer analyzer, Term delTerm)
throws CorruptIndexException, IOException {
// Possibly trigger a flush, or wait until any running flush completes:
boolean doFlush = flushControl.waitUpdate(1, delTerm != null ? 1 : 0);
// This call is synchronized but fast
final DocumentsWriterThreadState state = getThreadState(doc, delTerm);
final DocState docState = state.docState;
docState.doc = doc;
docState.analyzer = analyzer;
boolean success = false;
try {
// This call is not synchronized and does all the
// work
final DocWriter perDoc;
try {
perDoc = state.consumer.processDocument();
} finally {
docState.clear();
}
// This call is synchronized but fast
finishDocument(state, perDoc);
success = true;
} finally {
if (!success) {
// If this thread state had decided to flush, we
// must clear it so another thread can flush
if (doFlush) {
flushControl.clearFlushPending();
}
if (infoStream != null) {
message("exception in updateDocument aborting=" + aborting);
}
synchronized(this) {
state.isIdle = true;
notifyAll();
if (aborting) {
abort();
} else {
skipDocWriter.docID = docState.docID;
boolean success2 = false;
try {
waitQueue.add(skipDocWriter);
success2 = true;
} finally {
if (!success2) {
abort();
return false;
}
}
// Immediately mark this document as deleted
// since likely it was partially added. This
// keeps indexing as "all or none" (atomic) when
// adding a document:
deleteDocID(state.docState.docID);
}
}
}
}
doFlush |= flushControl.flushByRAMUsage("new document");
return doFlush;
}
public synchronized void waitIdle() {
while (!allThreadsIdle()) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
}
synchronized void waitReady(DocumentsWriterThreadState state) {
while (!closed && (!state.isIdle || aborting)) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Does the synchronized work to finish/flush the
* inverted document. */
private void finishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter) throws IOException {
// Must call this w/o holding synchronized(this) else
// we'll hit deadlock:
balanceRAM();
synchronized(this) {
assert docWriter == null || docWriter.docID == perThread.docState.docID;
if (aborting) {
// We are currently aborting, and another thread is
// waiting for me to become idle. We just forcefully
// idle this threadState; it will be fully reset by
// abort()
if (docWriter != null) {
try {
docWriter.abort();
} catch (Throwable t) {
}
}
perThread.isIdle = true;
// wakes up any threads waiting on the wait queue
notifyAll();
return;
}
final boolean doPause;
if (docWriter != null) {
doPause = waitQueue.add(docWriter);
} else {
skipDocWriter.docID = perThread.docState.docID;
doPause = waitQueue.add(skipDocWriter);
}
if (doPause) {
waitForWaitQueue();
}
perThread.isIdle = true;
// wakes up any threads waiting on the wait queue
notifyAll();
}
}
synchronized void waitForWaitQueue() {
do {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
} while (!waitQueue.doResume());
}
private static class SkipDocWriter extends DocWriter {
@Override
void finish() {
}
@Override
void abort() {
}
@Override
long sizeInBytes() {
return 0;
}
}
final SkipDocWriter skipDocWriter = new SkipDocWriter();
NumberFormat nf = NumberFormat.getInstance();
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK;
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
final static int MAX_TERM_LENGTH_UTF8 = BYTE_BLOCK_SIZE-2;
/* Initial chunks size of the shared int[] blocks used to
store postings data */
final static int INT_BLOCK_SHIFT = 13;
final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
private List<int[]> freeIntBlocks = new ArrayList<int[]>();
/* Allocate another int[] from the shared pool */
synchronized int[] getIntBlock() {
final int size = freeIntBlocks.size();
final int[] b;
if (0 == size) {
b = new int[INT_BLOCK_SIZE];
bytesUsed.addAndGet(INT_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_INT);
} else {
b = freeIntBlocks.remove(size-1);
}
return b;
}
long bytesUsed() {
return bytesUsed.get() + pendingDeletes.bytesUsed.get();
}
/* Return int[]s to the pool */
synchronized void recycleIntBlocks(int[][] blocks, int start, int end) {
for(int i=start;i<end;i++) {
freeIntBlocks.add(blocks[i]);
blocks[i] = null;
}
}
final RecyclingByteBlockAllocator byteBlockAllocator = new RecyclingByteBlockAllocator(BYTE_BLOCK_SIZE, Integer.MAX_VALUE, bytesUsed);
final static int PER_DOC_BLOCK_SIZE = 1024;
final RecyclingByteBlockAllocator perDocAllocator = new RecyclingByteBlockAllocator(PER_DOC_BLOCK_SIZE, Integer.MAX_VALUE, bytesUsed);
String toMB(long v) {
return nf.format(v/1024./1024.);
}
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
void balanceRAM() {
final boolean doBalance;
final long deletesRAMUsed;
deletesRAMUsed = bufferedDeletes.bytesUsed();
synchronized(this) {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
return;
}
doBalance = bytesUsed() + deletesRAMUsed >= ramBufferSize;
}
if (doBalance) {
if (infoStream != null) {
message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
" vs trigger=" + toMB(ramBufferSize) +
" deletesMB=" + toMB(deletesRAMUsed) +
" byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
" perDocFree=" + toMB(perDocAllocator.bytesUsed()));
}
final long startBytesUsed = bytesUsed() + deletesRAMUsed;
int iter = 0;
// We free equally from each pool in 32 KB
// chunks until we are below our threshold
// (freeLevel)
boolean any = true;
while(bytesUsed()+deletesRAMUsed > freeLevel) {
synchronized(this) {
if (0 == perDocAllocator.numBufferedBlocks() &&
0 == byteBlockAllocator.numBufferedBlocks() &&
0 == freeIntBlocks.size() && !any) {
// Nothing else to free -- must flush now.
bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
if (infoStream != null) {
if (bytesUsed()+deletesRAMUsed > ramBufferSize) {
message(" nothing to free; set bufferIsFull");
} else {
message(" nothing to free");
}
}
break;
}
if ((0 == iter % 4) && byteBlockAllocator.numBufferedBlocks() > 0) {
byteBlockAllocator.freeBlocks(1);
}
if ((1 == iter % 4) && freeIntBlocks.size() > 0) {
freeIntBlocks.remove(freeIntBlocks.size()-1);
bytesUsed.addAndGet(-INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT);
}
if ((2 == iter % 4) && perDocAllocator.numBufferedBlocks() > 0) {
perDocAllocator.freeBlocks(32); // Remove upwards of 32 blocks (each block is 1K)
}
}
if ((3 == iter % 4) && any) {
// Ask consumer to free any recycled state
any = consumer.freeRAM();
}
iter++;
}
if (infoStream != null) {
message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
}
}
}
final WaitQueue waitQueue = new WaitQueue();
private class WaitQueue {
DocWriter[] waiting;
int nextWriteDocID;
int nextWriteLoc;
int numWaiting;
long waitingBytes;
public WaitQueue() {
waiting = new DocWriter[10];
}
synchronized void reset() {
// NOTE: nextWriteLoc doesn't need to be reset
assert numWaiting == 0;
assert waitingBytes == 0;
nextWriteDocID = 0;
}
synchronized boolean doResume() {
return waitingBytes <= waitQueueResumeBytes;
}
synchronized boolean doPause() {
return waitingBytes > waitQueuePauseBytes;
}
synchronized void abort() {
int count = 0;
for(int i=0;i<waiting.length;i++) {
final DocWriter doc = waiting[i];
if (doc != null) {
doc.abort();
waiting[i] = null;
count++;
}
}
waitingBytes = 0;
assert count == numWaiting;
numWaiting = 0;
}
private void writeDocument(DocWriter doc) throws IOException {
assert doc == skipDocWriter || nextWriteDocID == doc.docID;
boolean success = false;
try {
doc.finish();
nextWriteDocID++;
nextWriteLoc++;
assert nextWriteLoc <= waiting.length;
if (nextWriteLoc == waiting.length) {
nextWriteLoc = 0;
}
success = true;
} finally {
if (!success) {
setAborting();
}
}
}
synchronized public boolean add(DocWriter doc) throws IOException {
assert doc.docID >= nextWriteDocID;
if (doc.docID == nextWriteDocID) {
writeDocument(doc);
while(true) {
doc = waiting[nextWriteLoc];
if (doc != null) {
numWaiting--;
waiting[nextWriteLoc] = null;
waitingBytes -= doc.sizeInBytes();
writeDocument(doc);
} else {
break;
}
}
} else {
// I finished before documents that were added
// before me. This can easily happen when I am a
// small doc and the docs before me were large, or,
// just due to luck in the thread scheduling. Just
// add myself to the queue and when that large doc
// finishes, it will flush me:
int gap = doc.docID - nextWriteDocID;
if (gap >= waiting.length) {
// Grow queue
DocWriter[] newArray = new DocWriter[ArrayUtil.oversize(gap, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
assert nextWriteLoc >= 0;
System.arraycopy(waiting, nextWriteLoc, newArray, 0, waiting.length-nextWriteLoc);
System.arraycopy(waiting, 0, newArray, waiting.length-nextWriteLoc, nextWriteLoc);
nextWriteLoc = 0;
waiting = newArray;
gap = doc.docID - nextWriteDocID;
}
int loc = nextWriteLoc + gap;
if (loc >= waiting.length) {
loc -= waiting.length;
}
// We should only wrap one time
assert loc < waiting.length;
// Nobody should be in my spot!
assert waiting[loc] == null;
waiting[loc] = doc;
numWaiting++;
waitingBytes += doc.sizeInBytes();
}
return doPause();
}
}
}
Left
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMFile;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.RecyclingByteBlockAllocator;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.RamUsageEstimator;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
final AtomicLong bytesUsed = new AtomicLong(0);
IndexWriter writer;
Directory directory;
String segment; // Current segment we are working on
private int nextDocID; // Next docID to be added
private int numDocs; // # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
private final HashMap<Thread,DocumentsWriterThreadState> threadBindings = new HashMap<Thread,DocumentsWriterThreadState>();
boolean bufferIsFull; // True when it's time to write segment
private boolean aborting; // True if an abort is pending
PrintStream infoStream;
SimilarityProvider similarityProvider;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
private final int maxThreadStates;
// Deletes for our still-in-RAM (to be flushed next) segment
private SegmentDeletes pendingDeletes = new SegmentDeletes();
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
/** Consumer returns this on each doc. This holds any
* state that must be flushed synchronized "in docID
* order". We gather these and flush them in order. */
abstract static class DocWriter {
DocWriter next;
int docID;
abstract void finish() throws IOException;
abstract void abort();
abstract long sizeInBytes();
void setNext(DocWriter next) {
this.next = next;
}
}
/**
* Create and return a new DocWriterBuffer.
*/
PerDocBuffer newPerDocBuffer() {
return new PerDocBuffer();
}
/**
* RAMFile buffer for DocWriters.
*/
@SuppressWarnings("serial")
class PerDocBuffer extends RAMFile {
/**
* Allocate bytes used from shared pool.
*/
@Override
protected byte[] newBuffer(int size) {
assert size == PER_DOC_BLOCK_SIZE;
return perDocAllocator.getByteBlock();
}
/**
* Recycle the bytes used.
*/
synchronized void recycle() {
if (buffers.size() > 0) {
setLength(0);
// Recycle the blocks
perDocAllocator.recycleByteBlocks(buffers);
buffers.clear();
sizeInBytes = 0;
assert numBuffers() == 0;
}
}
}
/**
* The IndexingChain must define the {@link #getChain(DocumentsWriter)} method
* which returns the DocConsumer that the DocumentsWriter calls to process the
* documents.
*/
abstract static class IndexingChain {
abstract DocConsumer getChain(DocumentsWriter documentsWriter);
}
static final IndexingChain defaultIndexingChain = new IndexingChain() {
@Override
DocConsumer getChain(DocumentsWriter documentsWriter) {
/*
This is the current indexing chain:
DocConsumer / DocConsumerPerThread
--> code: DocFieldProcessor / DocFieldProcessorPerThread
--> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
--> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
--> code: DocInverter / DocInverterPerThread / DocInverterPerField
--> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: TermsHash / TermsHashPerThread / TermsHashPerField
--> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
--> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
--> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
--> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
--> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
*/
// Build up indexing chain:
final TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(documentsWriter);
final TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();
/*
* nesting TermsHash instances here to allow the secondary (TermVectors) share the interned postings
* via a shared ByteBlockPool. See TermsHashPerField for details.
*/
final TermsHash termVectorsTermHash = new TermsHash(documentsWriter, false, termVectorsWriter, null);
final InvertedDocConsumer termsHash = new TermsHash(documentsWriter, true, freqProxWriter, termVectorsTermHash);
final NormsWriter normsWriter = new NormsWriter();
final DocInverter docInverter = new DocInverter(termsHash, normsWriter);
return new DocFieldProcessor(documentsWriter, docInverter);
}
};
final DocConsumer consumer;
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
private long waitQueuePauseBytes = (long) (ramBufferSize*0.1);
private long waitQueueResumeBytes = (long) (ramBufferSize*0.05);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
private long freeLevel = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*0.95);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private boolean closed;
private final FieldInfos fieldInfos;
private final BufferedDeletes bufferedDeletes;
private final IndexWriter.FlushControl flushControl;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
synchronized void deleteDocID(int docIDUpto) {
pendingDeletes.addDocID(docIDUpto);
// NOTE: we do not trigger flush here. This is
// potentially a RAM leak, if you have an app that tries
// to add docs but every single doc always hits a
// non-aborting exception. Allowing a flush here gets
// very messy because we are only invoked when handling
// exceptions so to do this properly, while handling an
// exception we'd have to go off and flush new deletes
// which is risky (likely would hit some other
// confounding exception).
}
boolean deleteQueries(Query... queries) {
final boolean doFlush = flushControl.waitUpdate(0, queries.length);
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, numDocs);
}
}
return doFlush;
}
boolean deleteQuery(Query query) {
final boolean doFlush = flushControl.waitUpdate(0, 1);
synchronized(this) {
pendingDeletes.addQuery(query, numDocs);
}
return doFlush;
}
boolean deleteTerms(Term... terms) {
final boolean doFlush = flushControl.waitUpdate(0, terms.length);
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, numDocs);
}
}
return doFlush;
}
boolean deleteTerm(Term term, boolean skipWait) {
final boolean doFlush = flushControl.waitUpdate(0, 1, skipWait);
synchronized(this) {
pendingDeletes.addTerm(term, numDocs);
}
return doFlush;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.infoStream = infoStream;
}
}
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarityProvider = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarityProvider = similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
waitQueuePauseBytes = 4*1024*1024;
waitQueueResumeBytes = 2*1024*1024;
} else {
ramBufferSize = (long) (mb*1024*1024);
waitQueuePauseBytes = (long) (ramBufferSize*0.1);
waitQueueResumeBytes = (long) (ramBufferSize*0.05);
freeLevel = (long) (0.95 * ramBufferSize);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
synchronized String getSegment() {
return segment;
}
/** Returns how many docs are currently buffered in RAM. */
synchronized int getNumDocs() {
return numDocs;
}
void message(String message) {
if (infoStream != null) {
writer.message("DW: " + message);
}
}
synchronized void setAborting() {
if (infoStream != null) {
message("setAborting");
}
aborting = true;
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
if (infoStream != null) {
message("docWriter: abort");
}
boolean success = false;
try {
// Forcefully remove waiting ThreadStates from line
waitQueue.abort();
// Wait for all other threads to finish with
// DocumentsWriter:
waitIdle();
if (infoStream != null) {
message("docWriter: abort waitIdle done");
}
assert 0 == waitQueue.numWaiting: "waitQueue.numWaiting=" + waitQueue.numWaiting;
waitQueue.waitingBytes = 0;
pendingDeletes.clear();
for (DocumentsWriterThreadState threadState : threadStates)
try {
threadState.consumer.abort();
} catch (Throwable t) {
}
try {
consumer.abort();
} catch (Throwable t) {
}
// Reset all postings data
doAfterFlush();
success = true;
} finally {
aborting = false;
notifyAll();
if (infoStream != null) {
message("docWriter: done abort; success=" + success);
}
}
}
/** Reset after a flush */
private void doAfterFlush() throws IOException {
// All ThreadStates should be idle when we are called
assert allThreadsIdle();
threadBindings.clear();
waitQueue.reset();
segment = null;
numDocs = 0;
nextDocID = 0;
bufferIsFull = false;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].doAfterFlush();
}
}
private synchronized boolean allThreadsIdle() {
for(int i=0;i<threadStates.length;i++) {
if (!threadStates[i].isIdle) {
return false;
}
}
return true;
}
synchronized boolean anyChanges() {
return numDocs != 0 || pendingDeletes.any();
}
// for testing
public SegmentDeletes getPendingDeletes() {
return pendingDeletes;
}
private void pushDeletes(SegmentInfo newSegment, SegmentInfos segmentInfos) {
// Lock order: DW -> BD
if (pendingDeletes.any()) {
if (newSegment != null) {
if (infoStream != null) {
message("flush: push buffered deletes to newSegment");
}
bufferedDeletes.pushDeletes(pendingDeletes, newSegment);
} else if (segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(pendingDeletes, segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
pendingDeletes = new SegmentDeletes();
}
}
public boolean anyDeletions() {
return pendingDeletes.any();
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
synchronized SegmentInfo flush(IndexWriter writer, IndexFileDeleter deleter, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
final long startTime = System.currentTimeMillis();
// We change writer's segmentInfos:
assert Thread.holdsLock(writer);
waitIdle();
if (numDocs == 0) {
// nothing to do!
if (infoStream != null) {
message("flush: no docs; skipping");
}
// Lock order: IW -> DW -> BD
pushDeletes(null, segmentInfos);
return null;
}
if (aborting) {
if (infoStream != null) {
message("flush: skip because aborting is set");
}
return null;
}
boolean success = false;
SegmentInfo newSegment;
try {
assert nextDocID == numDocs;
assert waitQueue.numWaiting == 0;
assert waitQueue.waitingBytes == 0;
if (infoStream != null) {
message("flush postings as segment " + segment + " numDocs=" + numDocs);
}
final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos,
numDocs, writer.getConfig().getTermIndexInterval(),
SegmentCodecs.build(fieldInfos, writer.codecs));
newSegment = new SegmentInfo(segment, numDocs, directory, false, fieldInfos.hasProx(), flushState.segmentCodecs, false);
Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
for (DocumentsWriterThreadState threadState : threadStates) {
threads.add(threadState.consumer);
}
double startMBUsed = bytesUsed()/1024./1024.;
consumer.flush(threads, flushState);
newSegment.setHasVectors(flushState.hasVectors);
if (infoStream != null) {
message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors"));
message("flushedFiles=" + newSegment.files());
message("flushed codecs=" + newSegment.getSegmentCodecs());
}
if (mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
final String cfsFileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.COMPOUND_FILE_EXTENSION);
if (infoStream != null) {
message("flush: create compound file \"" + cfsFileName + "\"");
}
CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
for(String fileName : newSegment.files()) {
cfsWriter.addFile(fileName);
}
cfsWriter.close();
deleter.deleteNewFiles(newSegment.files());
newSegment.setUseCompoundFile(true);
}
if (infoStream != null) {
message("flush: segment=" + newSegment);
final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.;
final double newSegmentSize = newSegment.sizeInBytes(true)/1024./1024.;
message(" ramUsed=" + nf.format(startMBUsed) + " MB" +
" newFlushedSize=" + nf.format(newSegmentSize) + " MB" +
" (" + nf.format(newSegmentSizeNoStore) + " MB w/o doc stores)" +
" docs/MB=" + nf.format(numDocs / newSegmentSize) +
" new/old=" + nf.format(100.0 * newSegmentSizeNoStore / startMBUsed) + "%");
}
success = true;
} finally {
notifyAll();
if (!success) {
if (segment != null) {
deleter.refresh(segment);
}
abort();
}
}
doAfterFlush();
// Lock order: IW -> DW -> BD
pushDeletes(newSegment, segmentInfos);
if (infoStream != null) {
message("flush time " + (System.currentTimeMillis()-startTime) + " msec");
}
return newSegment;
}
synchronized void close() {
closed = true;
notifyAll();
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm) throws IOException {
final Thread currentThread = Thread.currentThread();
assert !Thread.holdsLock(writer);
// First, find a thread state. If this thread already
// has affinity to a specific ThreadState, use that one
// again.
DocumentsWriterThreadState state = threadBindings.get(currentThread);
if (state == null) {
// First time this thread has called us since last
// flush. Find the least loaded thread state:
DocumentsWriterThreadState minThreadState = null;
for(int i=0;i<threadStates.length;i++) {
DocumentsWriterThreadState ts = threadStates[i];
if (minThreadState == null || ts.numThreads < minThreadState.numThreads) {
minThreadState = ts;
}
}
if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.length >= maxThreadStates)) {
state = minThreadState;
state.numThreads++;
} else {
// Just create a new "private" thread state
DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1+threadStates.length];
if (threadStates.length > 0) {
System.arraycopy(threadStates, 0, newArray, 0, threadStates.length);
}
state = newArray[threadStates.length] = new DocumentsWriterThreadState(this);
threadStates = newArray;
}
threadBindings.put(currentThread, state);
}
// Next, wait until my thread state is idle (in case
// it's shared with other threads), and no flush/abort
// pending
waitReady(state);
// Allocate segment name if this is the first doc since
// last flush:
if (segment == null) {
segment = writer.newSegmentName();
assert numDocs == 0;
}
state.docState.docID = nextDocID++;
if (delTerm != null) {
pendingDeletes.addTerm(delTerm, state.docState.docID);
}
numDocs++;
state.isIdle = false;
return state;
}
boolean addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
return updateDocument(doc, analyzer, null);
}
boolean updateDocument(Document doc, Analyzer analyzer, Term delTerm)
throws CorruptIndexException, IOException {
// Possibly trigger a flush, or wait until any running flush completes:
boolean doFlush = flushControl.waitUpdate(1, delTerm != null ? 1 : 0);
// This call is synchronized but fast
final DocumentsWriterThreadState state = getThreadState(doc, delTerm);
final DocState docState = state.docState;
docState.doc = doc;
docState.analyzer = analyzer;
boolean success = false;
try {
// This call is not synchronized and does all the
// work
final DocWriter perDoc;
try {
perDoc = state.consumer.processDocument();
} finally {
docState.clear();
}
// This call is synchronized but fast
finishDocument(state, perDoc);
success = true;
} finally {
if (!success) {
// If this thread state had decided to flush, we
// must clear it so another thread can flush
if (doFlush) {
flushControl.clearFlushPending();
}
if (infoStream != null) {
message("exception in updateDocument aborting=" + aborting);
}
synchronized(this) {
state.isIdle = true;
notifyAll();
if (aborting) {
abort();
} else {
skipDocWriter.docID = docState.docID;
boolean success2 = false;
try {
waitQueue.add(skipDocWriter);
success2 = true;
} finally {
if (!success2) {
abort();
return false;
}
}
// Immediately mark this document as deleted
// since likely it was partially added. This
// keeps indexing as "all or none" (atomic) when
// adding a document:
deleteDocID(state.docState.docID);
}
}
}
}
doFlush |= flushControl.flushByRAMUsage("new document");
return doFlush;
}
public synchronized void waitIdle() {
while (!allThreadsIdle()) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
}
synchronized void waitReady(DocumentsWriterThreadState state) {
while (!closed && (!state.isIdle || aborting)) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Does the synchronized work to finish/flush the
* inverted document. */
private void finishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter) throws IOException {
// Must call this w/o holding synchronized(this) else
// we'll hit deadlock:
balanceRAM();
synchronized(this) {
assert docWriter == null || docWriter.docID == perThread.docState.docID;
if (aborting) {
// We are currently aborting, and another thread is
// waiting for me to become idle. We just forcefully
// idle this threadState; it will be fully reset by
// abort()
if (docWriter != null) {
try {
docWriter.abort();
} catch (Throwable t) {
}
}
perThread.isIdle = true;
// wakes up any threads waiting on the wait queue
notifyAll();
return;
}
final boolean doPause;
if (docWriter != null) {
doPause = waitQueue.add(docWriter);
} else {
skipDocWriter.docID = perThread.docState.docID;
doPause = waitQueue.add(skipDocWriter);
}
if (doPause) {
waitForWaitQueue();
}
perThread.isIdle = true;
// wakes up any threads waiting on the wait queue
notifyAll();
}
}
synchronized void waitForWaitQueue() {
do {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
} while (!waitQueue.doResume());
}
private static class SkipDocWriter extends DocWriter {
@Override
void finish() {
}
@Override
void abort() {
}
@Override
long sizeInBytes() {
return 0;
}
}
final SkipDocWriter skipDocWriter = new SkipDocWriter();
NumberFormat nf = NumberFormat.getInstance();
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK;
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
final static int MAX_TERM_LENGTH_UTF8 = BYTE_BLOCK_SIZE-2;
/* Initial chunks size of the shared int[] blocks used to
store postings data */
final static int INT_BLOCK_SHIFT = 13;
final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
private List<int[]> freeIntBlocks = new ArrayList<int[]>();
/* Allocate another int[] from the shared pool */
synchronized int[] getIntBlock() {
final int size = freeIntBlocks.size();
final int[] b;
if (0 == size) {
b = new int[INT_BLOCK_SIZE];
bytesUsed.addAndGet(INT_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_INT);
} else {
b = freeIntBlocks.remove(size-1);
}
return b;
}
long bytesUsed() {
return bytesUsed.get() + pendingDeletes.bytesUsed.get();
}
/* Return int[]s to the pool */
synchronized void recycleIntBlocks(int[][] blocks, int start, int end) {
for(int i=start;i<end;i++) {
freeIntBlocks.add(blocks[i]);
blocks[i] = null;
}
}
final RecyclingByteBlockAllocator byteBlockAllocator = new RecyclingByteBlockAllocator(BYTE_BLOCK_SIZE, Integer.MAX_VALUE, bytesUsed);
final static int PER_DOC_BLOCK_SIZE = 1024;
final RecyclingByteBlockAllocator perDocAllocator = new RecyclingByteBlockAllocator(PER_DOC_BLOCK_SIZE, Integer.MAX_VALUE, bytesUsed);
String toMB(long v) {
return nf.format(v/1024./1024.);
}
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
void balanceRAM() {
final boolean doBalance;
final long deletesRAMUsed;
deletesRAMUsed = bufferedDeletes.bytesUsed();
synchronized(this) {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
return;
}
doBalance = bytesUsed() + deletesRAMUsed >= ramBufferSize;
}
if (doBalance) {
if (infoStream != null) {
message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
" vs trigger=" + toMB(ramBufferSize) +
" deletesMB=" + toMB(deletesRAMUsed) +
" byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
" perDocFree=" + toMB(perDocAllocator.bytesUsed()));
}
final long startBytesUsed = bytesUsed() + deletesRAMUsed;
int iter = 0;
// We free equally from each pool in 32 KB
// chunks until we are below our threshold
// (freeLevel)
boolean any = true;
while(bytesUsed()+deletesRAMUsed > freeLevel) {
synchronized(this) {
if (0 == perDocAllocator.numBufferedBlocks() &&
0 == byteBlockAllocator.numBufferedBlocks() &&
0 == freeIntBlocks.size() && !any) {
// Nothing else to free -- must flush now.
bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
if (infoStream != null) {
if (bytesUsed()+deletesRAMUsed > ramBufferSize) {
message(" nothing to free; set bufferIsFull");
} else {
message(" nothing to free");
}
}
break;
}
if ((0 == iter % 4) && byteBlockAllocator.numBufferedBlocks() > 0) {
byteBlockAllocator.freeBlocks(1);
}
if ((1 == iter % 4) && freeIntBlocks.size() > 0) {
freeIntBlocks.remove(freeIntBlocks.size()-1);
bytesUsed.addAndGet(-INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT);
}
if ((2 == iter % 4) && perDocAllocator.numBufferedBlocks() > 0) {
perDocAllocator.freeBlocks(32); // Remove upwards of 32 blocks (each block is 1K)
}
}
if ((3 == iter % 4) && any) {
// Ask consumer to free any recycled state
any = consumer.freeRAM();
}
iter++;
}
if (infoStream != null) {
message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
}
}
}
final WaitQueue waitQueue = new WaitQueue();
private class WaitQueue {
DocWriter[] waiting;
int nextWriteDocID;
int nextWriteLoc;
int numWaiting;
long waitingBytes;
public WaitQueue() {
waiting = new DocWriter[10];
}
synchronized void reset() {
// NOTE: nextWriteLoc doesn't need to be reset
assert numWaiting == 0;
assert waitingBytes == 0;
nextWriteDocID = 0;
}
synchronized boolean doResume() {
return waitingBytes <= waitQueueResumeBytes;
}
synchronized boolean doPause() {
return waitingBytes > waitQueuePauseBytes;
}
synchronized void abort() {
int count = 0;
for(int i=0;i<waiting.length;i++) {
final DocWriter doc = waiting[i];
if (doc != null) {
doc.abort();
waiting[i] = null;
count++;
}
}
waitingBytes = 0;
assert count == numWaiting;
numWaiting = 0;
}
private void writeDocument(DocWriter doc) throws IOException {
assert doc == skipDocWriter || nextWriteDocID == doc.docID;
boolean success = false;
try {
doc.finish();
nextWriteDocID++;
nextWriteLoc++;
assert nextWriteLoc <= waiting.length;
if (nextWriteLoc == waiting.length) {
nextWriteLoc = 0;
}
success = true;
} finally {
if (!success) {
setAborting();
}
}
}
synchronized public boolean add(DocWriter doc) throws IOException {
assert doc.docID >= nextWriteDocID;
if (doc.docID == nextWriteDocID) {
writeDocument(doc);
while(true) {
doc = waiting[nextWriteLoc];
if (doc != null) {
numWaiting--;
waiting[nextWriteLoc] = null;
waitingBytes -= doc.sizeInBytes();
writeDocument(doc);
} else {
break;
}
}
} else {
// I finished before documents that were added
// before me. This can easily happen when I am a
// small doc and the docs before me were large, or,
// just due to luck in the thread scheduling. Just
// add myself to the queue and when that large doc
// finishes, it will flush me:
int gap = doc.docID - nextWriteDocID;
if (gap >= waiting.length) {
// Grow queue
DocWriter[] newArray = new DocWriter[ArrayUtil.oversize(gap, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
assert nextWriteLoc >= 0;
System.arraycopy(waiting, nextWriteLoc, newArray, 0, waiting.length-nextWriteLoc);
System.arraycopy(waiting, 0, newArray, waiting.length-nextWriteLoc, nextWriteLoc);
nextWriteLoc = 0;
waiting = newArray;
gap = doc.docID - nextWriteDocID;
}
int loc = nextWriteLoc + gap;
if (loc >= waiting.length) {
loc -= waiting.length;
}
// We should only wrap one time
assert loc < waiting.length;
// Nobody should be in my spot!
assert waiting[loc] == null;
waiting[loc] = doc;
numWaiting++;
waitingBytes += doc.sizeInBytes();
}
return doPause();
}
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMFile;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.RecyclingByteBlockAllocator;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.RamUsageEstimator;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
final AtomicLong bytesUsed = new AtomicLong(0);
IndexWriter writer;
Directory directory;
String segment; // Current segment we are working on
private int nextDocID; // Next docID to be added
private int numDocs; // # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
private final HashMap<Thread,DocumentsWriterThreadState> threadBindings = new HashMap<Thread,DocumentsWriterThreadState>();
boolean bufferIsFull; // True when it's time to write segment
private boolean aborting; // True if an abort is pending
PrintStream infoStream;
SimilarityProvider similarityProvider;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
private final int maxThreadStates;
// Deletes for our still-in-RAM (to be flushed next) segment
private SegmentDeletes pendingDeletes = new SegmentDeletes();
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
/** Consumer returns this on each doc. This holds any
* state that must be flushed synchronized "in docID
* order". We gather these and flush them in order. */
abstract static class DocWriter {
DocWriter next;
int docID;
abstract void finish() throws IOException;
abstract void abort();
abstract long sizeInBytes();
void setNext(DocWriter next) {
this.next = next;
}
}
/**
* Create and return a new DocWriterBuffer.
*/
PerDocBuffer newPerDocBuffer() {
return new PerDocBuffer();
}
/**
* RAMFile buffer for DocWriters.
*/
@SuppressWarnings("serial")
class PerDocBuffer extends RAMFile {
/**
* Allocate bytes used from shared pool.
*/
@Override
protected byte[] newBuffer(int size) {
assert size == PER_DOC_BLOCK_SIZE;
return perDocAllocator.getByteBlock();
}
/**
* Recycle the bytes used.
*/
synchronized void recycle() {
if (buffers.size() > 0) {
setLength(0);
// Recycle the blocks
perDocAllocator.recycleByteBlocks(buffers);
buffers.clear();
sizeInBytes = 0;
assert numBuffers() == 0;
}
}
}
/**
* The IndexingChain must define the {@link #getChain(DocumentsWriter)} method
* which returns the DocConsumer that the DocumentsWriter calls to process the
* documents.
*/
abstract static class IndexingChain {
abstract DocConsumer getChain(DocumentsWriter documentsWriter);
}
static final IndexingChain defaultIndexingChain = new IndexingChain() {
@Override
DocConsumer getChain(DocumentsWriter documentsWriter) {
/*
This is the current indexing chain:
DocConsumer / DocConsumerPerThread
--> code: DocFieldProcessor / DocFieldProcessorPerThread
--> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
--> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
--> code: DocInverter / DocInverterPerThread / DocInverterPerField
--> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: TermsHash / TermsHashPerThread / TermsHashPerField
--> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
--> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
--> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
--> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
--> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
*/
// Build up indexing chain:
final TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(documentsWriter);
final TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();
/*
* nesting TermsHash instances here to allow the secondary (TermVectors) share the interned postings
* via a shared ByteBlockPool. See TermsHashPerField for details.
*/
final TermsHash termVectorsTermHash = new TermsHash(documentsWriter, false, termVectorsWriter, null);
final InvertedDocConsumer termsHash = new TermsHash(documentsWriter, true, freqProxWriter, termVectorsTermHash);
final NormsWriter normsWriter = new NormsWriter();
final DocInverter docInverter = new DocInverter(termsHash, normsWriter);
return new DocFieldProcessor(documentsWriter, docInverter);
}
};
final DocConsumer consumer;
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
private long waitQueuePauseBytes = (long) (ramBufferSize*0.1);
private long waitQueueResumeBytes = (long) (ramBufferSize*0.05);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
private long freeLevel = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*0.95);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private boolean closed;
private final FieldInfos fieldInfos;
private final BufferedDeletes bufferedDeletes;
private final IndexWriter.FlushControl flushControl;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
synchronized void deleteDocID(int docIDUpto) {
pendingDeletes.addDocID(docIDUpto);
// NOTE: we do not trigger flush here. This is
// potentially a RAM leak, if you have an app that tries
// to add docs but every single doc always hits a
// non-aborting exception. Allowing a flush here gets
// very messy because we are only invoked when handling
// exceptions so to do this properly, while handling an
// exception we'd have to go off and flush new deletes
// which is risky (likely would hit some other
// confounding exception).
}
boolean deleteQueries(Query... queries) {
final boolean doFlush = flushControl.waitUpdate(0, queries.length);
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, numDocs);
}
}
return doFlush;
}
boolean deleteQuery(Query query) {
final boolean doFlush = flushControl.waitUpdate(0, 1);
synchronized(this) {
pendingDeletes.addQuery(query, numDocs);
}
return doFlush;
}
boolean deleteTerms(Term... terms) {
final boolean doFlush = flushControl.waitUpdate(0, terms.length);
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, numDocs);
}
}
return doFlush;
}
boolean deleteTerm(Term term, boolean skipWait) {
final boolean doFlush = flushControl.waitUpdate(0, 1, skipWait);
synchronized(this) {
pendingDeletes.addTerm(term, numDocs);
}
return doFlush;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.infoStream = infoStream;
}
}
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarityProvider = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarityProvider = similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
waitQueuePauseBytes = 4*1024*1024;
waitQueueResumeBytes = 2*1024*1024;
} else {
ramBufferSize = (long) (mb*1024*1024);
waitQueuePauseBytes = (long) (ramBufferSize*0.1);
waitQueueResumeBytes = (long) (ramBufferSize*0.05);
freeLevel = (long) (0.95 * ramBufferSize);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
synchronized String getSegment() {
return segment;
}
/** Returns how many docs are currently buffered in RAM. */
synchronized int getNumDocs() {
return numDocs;
}
void message(String message) {
if (infoStream != null) {
writer.message("DW: " + message);
}
}
synchronized void setAborting() {
if (infoStream != null) {
message("setAborting");
}
aborting = true;
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
if (infoStream != null) {
message("docWriter: abort");
}
boolean success = false;
try {
// Forcefully remove waiting ThreadStates from line
waitQueue.abort();
// Wait for all other threads to finish with
// DocumentsWriter:
waitIdle();
if (infoStream != null) {
message("docWriter: abort waitIdle done");
}
assert 0 == waitQueue.numWaiting: "waitQueue.numWaiting=" + waitQueue.numWaiting;
waitQueue.waitingBytes = 0;
pendingDeletes.clear();
for (DocumentsWriterThreadState threadState : threadStates)
try {
threadState.consumer.abort();
} catch (Throwable t) {
}
try {
consumer.abort();
} catch (Throwable t) {
}
// Reset all postings data
doAfterFlush();
success = true;
} finally {
aborting = false;
notifyAll();
if (infoStream != null) {
message("docWriter: done abort; success=" + success);
}
}
}
/** Reset after a flush */
private void doAfterFlush() throws IOException {
// All ThreadStates should be idle when we are called
assert allThreadsIdle();
threadBindings.clear();
waitQueue.reset();
segment = null;
numDocs = 0;
nextDocID = 0;
bufferIsFull = false;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].doAfterFlush();
}
}
private synchronized boolean allThreadsIdle() {
for(int i=0;i<threadStates.length;i++) {
if (!threadStates[i].isIdle) {
return false;
}
}
return true;
}
synchronized boolean anyChanges() {
return numDocs != 0 || pendingDeletes.any();
}
// for testing
public SegmentDeletes getPendingDeletes() {
return pendingDeletes;
}
private void pushDeletes(SegmentInfo newSegment, SegmentInfos segmentInfos) {
// Lock order: DW -> BD
if (pendingDeletes.any()) {
if (newSegment != null) {
if (infoStream != null) {
message("flush: push buffered deletes to newSegment");
}
bufferedDeletes.pushDeletes(pendingDeletes, newSegment);
} else if (segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(pendingDeletes, segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
pendingDeletes = new SegmentDeletes();
}
}
public boolean anyDeletions() {
return pendingDeletes.any();
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
synchronized SegmentInfo flush(IndexWriter writer, IndexFileDeleter deleter, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
final long startTime = System.currentTimeMillis();
// We change writer's segmentInfos:
assert Thread.holdsLock(writer);
waitIdle();
if (numDocs == 0) {
// nothing to do!
if (infoStream != null) {
message("flush: no docs; skipping");
}
// Lock order: IW -> DW -> BD
pushDeletes(null, segmentInfos);
return null;
}
if (aborting) {
if (infoStream != null) {
message("flush: skip because aborting is set");
}
return null;
}
boolean success = false;
SegmentInfo newSegment;
try {
assert nextDocID == numDocs;
assert waitQueue.numWaiting == 0;
assert waitQueue.waitingBytes == 0;
if (infoStream != null) {
message("flush postings as segment " + segment + " numDocs=" + numDocs);
}
final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos,
numDocs, writer.getConfig().getTermIndexInterval(),
SegmentCodecs.build(fieldInfos, writer.codecs));
newSegment = new SegmentInfo(segment, numDocs, directory, false, fieldInfos.hasProx(), flushState.segmentCodecs, false);
Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
for (DocumentsWriterThreadState threadState : threadStates) {
threads.add(threadState.consumer);
}
double startMBUsed = bytesUsed()/1024./1024.;
consumer.flush(threads, flushState);
newSegment.setHasVectors(flushState.hasVectors);
if (infoStream != null) {
message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors"));
message("flushedFiles=" + newSegment.files());
message("flushed codecs=" + newSegment.getSegmentCodecs());
}
if (mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
final String cfsFileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.COMPOUND_FILE_EXTENSION);
if (infoStream != null) {
message("flush: create compound file \"" + cfsFileName + "\"");
}
CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
for(String fileName : newSegment.files()) {
cfsWriter.addFile(fileName);
}
cfsWriter.close();
deleter.deleteNewFiles(newSegment.files());
newSegment.setUseCompoundFile(true);
}
if (infoStream != null) {
message("flush: segment=" + newSegment);
final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.;
final double newSegmentSize = newSegment.sizeInBytes(true)/1024./1024.;
message(" ramUsed=" + nf.format(startMBUsed) + " MB" +
" newFlushedSize=" + nf.format(newSegmentSize) + " MB" +
" (" + nf.format(newSegmentSizeNoStore) + " MB w/o doc stores)" +
" docs/MB=" + nf.format(numDocs / newSegmentSize) +
" new/old=" + nf.format(100.0 * newSegmentSizeNoStore / startMBUsed) + "%");
}
success = true;
} finally {
notifyAll();
if (!success) {
if (segment != null) {
deleter.refresh(segment);
}
abort();
}
}
doAfterFlush();
// Lock order: IW -> DW -> BD
pushDeletes(newSegment, segmentInfos);
if (infoStream != null) {
message("flush time " + (System.currentTimeMillis()-startTime) + " msec");
}
return newSegment;
}
synchronized void close() {
closed = true;
notifyAll();
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm) throws IOException {
final Thread currentThread = Thread.currentThread();
assert !Thread.holdsLock(writer);
// First, find a thread state. If this thread already
// has affinity to a specific ThreadState, use that one
// again.
DocumentsWriterThreadState state = threadBindings.get(currentThread);
if (state == null) {
// First time this thread has called us since last
// flush. Find the least loaded thread state:
DocumentsWriterThreadState minThreadState = null;
for(int i=0;i<threadStates.length;i++) {
DocumentsWriterThreadState ts = threadStates[i];
if (minThreadState == null || ts.numThreads < minThreadState.numThreads) {
minThreadState = ts;
}
}
if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.length >= maxThreadStates)) {
state = minThreadState;
state.numThreads++;
} else {
// Just create a new "private" thread state
DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1+threadStates.length];
if (threadStates.length > 0) {
System.arraycopy(threadStates, 0, newArray, 0, threadStates.length);
}
state = newArray[threadStates.length] = new DocumentsWriterThreadState(this);
threadStates = newArray;
}
threadBindings.put(currentThread, state);
}
// Next, wait until my thread state is idle (in case
// it's shared with other threads), and no flush/abort
// pending
waitReady(state);
// Allocate segment name if this is the first doc since
// last flush:
if (segment == null) {
segment = writer.newSegmentName();
assert numDocs == 0;
}
state.docState.docID = nextDocID++;
if (delTerm != null) {
pendingDeletes.addTerm(delTerm, state.docState.docID);
}
numDocs++;
state.isIdle = false;
return state;
}
boolean addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
return updateDocument(doc, analyzer, null);
}
boolean updateDocument(Document doc, Analyzer analyzer, Term delTerm)
throws CorruptIndexException, IOException {
// Possibly trigger a flush, or wait until any running flush completes:
boolean doFlush = flushControl.waitUpdate(1, delTerm != null ? 1 : 0);
// This call is synchronized but fast
final DocumentsWriterThreadState state = getThreadState(doc, delTerm);
final DocState docState = state.docState;
docState.doc = doc;
docState.analyzer = analyzer;
boolean success = false;
try {
// This call is not synchronized and does all the
// work
final DocWriter perDoc;
try {
perDoc = state.consumer.processDocument();
} finally {
docState.clear();
}
// This call is synchronized but fast
finishDocument(state, perDoc);
success = true;
} finally {
if (!success) {
// If this thread state had decided to flush, we
// must clear it so another thread can flush
if (doFlush) {
flushControl.clearFlushPending();
}
if (infoStream != null) {
message("exception in updateDocument aborting=" + aborting);
}
synchronized(this) {
state.isIdle = true;
notifyAll();
if (aborting) {
abort();
} else {
skipDocWriter.docID = docState.docID;
boolean success2 = false;
try {
waitQueue.add(skipDocWriter);
success2 = true;
} finally {
if (!success2) {
abort();
return false;
}
}
// Immediately mark this document as deleted
// since likely it was partially added. This
// keeps indexing as "all or none" (atomic) when
// adding a document:
deleteDocID(state.docState.docID);
}
}
}
}
doFlush |= flushControl.flushByRAMUsage("new document");
return doFlush;
}
public synchronized void waitIdle() {
while (!allThreadsIdle()) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
}
synchronized void waitReady(DocumentsWriterThreadState state) {
while (!closed && (!state.isIdle || aborting)) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Does the synchronized work to finish/flush the
* inverted document. */
private void finishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter) throws IOException {
// Must call this w/o holding synchronized(this) else
// we'll hit deadlock:
balanceRAM();
synchronized(this) {
assert docWriter == null || docWriter.docID == perThread.docState.docID;
if (aborting) {
// We are currently aborting, and another thread is
// waiting for me to become idle. We just forcefully
// idle this threadState; it will be fully reset by
// abort()
if (docWriter != null) {
try {
docWriter.abort();
} catch (Throwable t) {
}
}
perThread.isIdle = true;
// wakes up any threads waiting on the wait queue
notifyAll();
return;
}
final boolean doPause;
if (docWriter != null) {
doPause = waitQueue.add(docWriter);
} else {
skipDocWriter.docID = perThread.docState.docID;
doPause = waitQueue.add(skipDocWriter);
}
if (doPause) {
waitForWaitQueue();
}
perThread.isIdle = true;
// wakes up any threads waiting on the wait queue
notifyAll();
}
}
synchronized void waitForWaitQueue() {
do {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
} while (!waitQueue.doResume());
}
private static class SkipDocWriter extends DocWriter {
@Override
void finish() {
}
@Override
void abort() {
}
@Override
long sizeInBytes() {
return 0;
}
}
final SkipDocWriter skipDocWriter = new SkipDocWriter();
NumberFormat nf = NumberFormat.getInstance();
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK;
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
final static int MAX_TERM_LENGTH_UTF8 = BYTE_BLOCK_SIZE-2;
/* Initial chunks size of the shared int[] blocks used to
store postings data */
final static int INT_BLOCK_SHIFT = 13;
final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
private List<int[]> freeIntBlocks = new ArrayList<int[]>();
/* Allocate another int[] from the shared pool */
synchronized int[] getIntBlock() {
final int size = freeIntBlocks.size();
final int[] b;
if (0 == size) {
b = new int[INT_BLOCK_SIZE];
bytesUsed.addAndGet(INT_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_INT);
} else {
b = freeIntBlocks.remove(size-1);
}
return b;
}
long bytesUsed() {
return bytesUsed.get() + pendingDeletes.bytesUsed.get();
}
/* Return int[]s to the pool */
synchronized void recycleIntBlocks(int[][] blocks, int start, int end) {
for(int i=start;i<end;i++) {
freeIntBlocks.add(blocks[i]);
blocks[i] = null;
}
}
final RecyclingByteBlockAllocator byteBlockAllocator = new RecyclingByteBlockAllocator(BYTE_BLOCK_SIZE, Integer.MAX_VALUE, bytesUsed);
final static int PER_DOC_BLOCK_SIZE = 1024;
final RecyclingByteBlockAllocator perDocAllocator = new RecyclingByteBlockAllocator(PER_DOC_BLOCK_SIZE, Integer.MAX_VALUE, bytesUsed);
String toMB(long v) {
return nf.format(v/1024./1024.);
}
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
void balanceRAM() {
final boolean doBalance;
final long deletesRAMUsed;
deletesRAMUsed = bufferedDeletes.bytesUsed();
synchronized(this) {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
return;
}
doBalance = bytesUsed() + deletesRAMUsed >= ramBufferSize;
}
if (doBalance) {
if (infoStream != null) {
message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
" vs trigger=" + toMB(ramBufferSize) +
" deletesMB=" + toMB(deletesRAMUsed) +
" byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
" perDocFree=" + toMB(perDocAllocator.bytesUsed()));
}
final long startBytesUsed = bytesUsed() + deletesRAMUsed;
int iter = 0;
// We free equally from each pool in 32 KB
// chunks until we are below our threshold
// (freeLevel)
boolean any = true;
while(bytesUsed()+deletesRAMUsed > freeLevel) {
synchronized(this) {
if (0 == perDocAllocator.numBufferedBlocks() &&
0 == byteBlockAllocator.numBufferedBlocks() &&
0 == freeIntBlocks.size() && !any) {
// Nothing else to free -- must flush now.
bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
if (infoStream != null) {
if (bytesUsed()+deletesRAMUsed > ramBufferSize) {
message(" nothing to free; set bufferIsFull");
} else {
message(" nothing to free");
}
}
break;
}
if ((0 == iter % 4) && byteBlockAllocator.numBufferedBlocks() > 0) {
byteBlockAllocator.freeBlocks(1);
}
if ((1 == iter % 4) && freeIntBlocks.size() > 0) {
freeIntBlocks.remove(freeIntBlocks.size()-1);
bytesUsed.addAndGet(-INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT);
}
if ((2 == iter % 4) && perDocAllocator.numBufferedBlocks() > 0) {
perDocAllocator.freeBlocks(32); // Remove upwards of 32 blocks (each block is 1K)
}
}
if ((3 == iter % 4) && any) {
// Ask consumer to free any recycled state
any = consumer.freeRAM();
}
iter++;
}
if (infoStream != null) {
message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
}
}
}
final WaitQueue waitQueue = new WaitQueue();
private class WaitQueue {
DocWriter[] waiting;
int nextWriteDocID;
int nextWriteLoc;
int numWaiting;
long waitingBytes;
public WaitQueue() {
waiting = new DocWriter[10];
}
synchronized void reset() {
// NOTE: nextWriteLoc doesn't need to be reset
assert numWaiting == 0;
assert waitingBytes == 0;
nextWriteDocID = 0;
}
synchronized boolean doResume() {
return waitingBytes <= waitQueueResumeBytes;
}
synchronized boolean doPause() {
return waitingBytes > waitQueuePauseBytes;
}
synchronized void abort() {
int count = 0;
for(int i=0;i<waiting.length;i++) {
final DocWriter doc = waiting[i];
if (doc != null) {
doc.abort();
waiting[i] = null;
count++;
}
}
waitingBytes = 0;
assert count == numWaiting;
numWaiting = 0;
}
private void writeDocument(DocWriter doc) throws IOException {
assert doc == skipDocWriter || nextWriteDocID == doc.docID;
boolean success = false;
try {
doc.finish();
nextWriteDocID++;
nextWriteLoc++;
assert nextWriteLoc <= waiting.length;
if (nextWriteLoc == waiting.length) {
nextWriteLoc = 0;
}
success = true;
} finally {
if (!success) {
setAborting();
}
}
}
synchronized public boolean add(DocWriter doc) throws IOException {
assert doc.docID >= nextWriteDocID;
if (doc.docID == nextWriteDocID) {
writeDocument(doc);
while(true) {
doc = waiting[nextWriteLoc];
if (doc != null) {
numWaiting--;
waiting[nextWriteLoc] = null;
waitingBytes -= doc.sizeInBytes();
writeDocument(doc);
} else {
break;
}
}
} else {
// I finished before documents that were added
// before me. This can easily happen when I am a
// small doc and the docs before me were large, or,
// just due to luck in the thread scheduling. Just
// add myself to the queue and when that large doc
// finishes, it will flush me:
int gap = doc.docID - nextWriteDocID;
if (gap >= waiting.length) {
// Grow queue
DocWriter[] newArray = new DocWriter[ArrayUtil.oversize(gap, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
assert nextWriteLoc >= 0;
System.arraycopy(waiting, nextWriteLoc, newArray, 0, waiting.length-nextWriteLoc);
System.arraycopy(waiting, 0, newArray, waiting.length-nextWriteLoc, nextWriteLoc);
nextWriteLoc = 0;
waiting = newArray;
gap = doc.docID - nextWriteDocID;
}
int loc = nextWriteLoc + gap;
if (loc >= waiting.length) {
loc -= waiting.length;
}
// We should only wrap one time
assert loc < waiting.length;
// Nobody should be in my spot!
assert waiting[loc] == null;
waiting[loc] = doc;
numWaiting++;
waitingBytes += doc.sizeInBytes();
}
return doPause();
}
}
}
Right
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
boolean bufferIsFull; // True when it's time to write segment
private volatile boolean closed;
PrintStream infoStream;
Similarity similarity;
List<String> newFiles;
final IndexWriter indexWriter;
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
SegmentDeletes pendingDeletes;
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
pushConfigChange();
}
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles; // List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
// TODO: can we improve performance of this method by keeping track
// here in DW of whether any DWPT has deletions?
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
void close() {
closed = true;
}
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
// /* We have three pools of RAM: Postings, byte blocks
// * (holds freq/prox posting data) and per-doc buffers
// * (stored fields/term vectors). Different docs require
// * varying amount of storage from these classes. For
// * example, docs with many unique single-occurrence short
// * terms will use up the Postings RAM and hardly any of
// * the other two. Whereas docs with very large terms will
// * use alot of byte blocks RAM. This method just frees
// * allocations from the pools once we are over-budget,
// * which balances the pools to match the current docs. */
// void balanceRAM() {
//
// final boolean doBalance;
// final long deletesRAMUsed;
//
// deletesRAMUsed = bufferedDeletes.bytesUsed();
//
// synchronized(this) {
// if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
// return;
// }
//
// doBalance = bytesUsed() + deletesRAMUsed >= ramBufferSize;
// }
//
// if (doBalance) {
//
// if (infoStream != null)
// message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
// " vs trigger=" + toMB(ramBufferSize) +
// " deletesMB=" + toMB(deletesRAMUsed) +
// " byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
// " perDocFree=" + toMB(perDocAllocator.bytesUsed()));
//
// final long startBytesUsed = bytesUsed() + deletesRAMUsed;
//
// int iter = 0;
//
// // We free equally from each pool in 32 KB
// // chunks until we are below our threshold
// // (freeLevel)
//
// boolean any = true;
//
// while(bytesUsed()+deletesRAMUsed > freeLevel) {
//
// synchronized(this) {
// if (0 == perDocAllocator.numBufferedBlocks() &&
// 0 == byteBlockAllocator.numBufferedBlocks() &&
// 0 == freeIntBlocks.size() && !any) {
// // Nothing else to free -- must flush now.
// bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
// if (infoStream != null) {
// if (bytesUsed()+deletesRAMUsed > ramBufferSize)
// message(" nothing to free; set bufferIsFull");
// else
// message(" nothing to free");
// }
// break;
// }
//
// if ((0 == iter % 4) && byteBlockAllocator.numBufferedBlocks() > 0) {
// byteBlockAllocator.freeBlocks(1);
// }
// if ((1 == iter % 4) && freeIntBlocks.size() > 0) {
// freeIntBlocks.remove(freeIntBlocks.size()-1);
// bytesUsed.addAndGet(-INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT);
// }
// if ((2 == iter % 4) && perDocAllocator.numBufferedBlocks() > 0) {
// perDocAllocator.freeBlocks(32); // Remove upwards of 32 blocks (each block is 1K)
// }
// }
//
// if ((3 == iter % 4) && any)
// // Ask consumer to free any recycled state
// any = consumer.freeRAM();
//
// iter++;
// }
//
// if (infoStream != null)
// message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
// }
// }
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
boolean bufferIsFull; // True when it's time to write segment
private volatile boolean closed;
PrintStream infoStream;
Similarity similarity;
List<String> newFiles;
final IndexWriter indexWriter;
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
SegmentDeletes pendingDeletes;
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
pushConfigChange();
}
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles; // List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
// TODO: can we improve performance of this method by keeping track
// here in DW of whether any DWPT has deletions?
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
void close() {
closed = true;
}
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
// /* We have three pools of RAM: Postings, byte blocks
// * (holds freq/prox posting data) and per-doc buffers
// * (stored fields/term vectors). Different docs require
// * varying amount of storage from these classes. For
// * example, docs with many unique single-occurrence short
// * terms will use up the Postings RAM and hardly any of
// * the other two. Whereas docs with very large terms will
// * use alot of byte blocks RAM. This method just frees
// * allocations from the pools once we are over-budget,
// * which balances the pools to match the current docs. */
// void balanceRAM() {
//
// final boolean doBalance;
// final long deletesRAMUsed;
//
// deletesRAMUsed = bufferedDeletes.bytesUsed();
//
// synchronized(this) {
// if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
// return;
// }
//
// doBalance = bytesUsed() + deletesRAMUsed >= ramBufferSize;
// }
//
// if (doBalance) {
//
// if (infoStream != null)
// message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
// " vs trigger=" + toMB(ramBufferSize) +
// " deletesMB=" + toMB(deletesRAMUsed) +
// " byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
// " perDocFree=" + toMB(perDocAllocator.bytesUsed()));
//
// final long startBytesUsed = bytesUsed() + deletesRAMUsed;
//
// int iter = 0;
//
// // We free equally from each pool in 32 KB
// // chunks until we are below our threshold
// // (freeLevel)
//
// boolean any = true;
//
// while(bytesUsed()+deletesRAMUsed > freeLevel) {
//
// synchronized(this) {
// if (0 == perDocAllocator.numBufferedBlocks() &&
// 0 == byteBlockAllocator.numBufferedBlocks() &&
// 0 == freeIntBlocks.size() && !any) {
// // Nothing else to free -- must flush now.
// bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
// if (infoStream != null) {
// if (bytesUsed()+deletesRAMUsed > ramBufferSize)
// message(" nothing to free; set bufferIsFull");
// else
// message(" nothing to free");
// }
// break;
// }
//
// if ((0 == iter % 4) && byteBlockAllocator.numBufferedBlocks() > 0) {
// byteBlockAllocator.freeBlocks(1);
// }
// if ((1 == iter % 4) && freeIntBlocks.size() > 0) {
// freeIntBlocks.remove(freeIntBlocks.size()-1);
// bytesUsed.addAndGet(-INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT);
// }
// if ((2 == iter % 4) && perDocAllocator.numBufferedBlocks() > 0) {
// perDocAllocator.freeBlocks(32); // Remove upwards of 32 blocks (each block is 1K)
// }
// }
//
// if ((3 == iter % 4) && any)
// // Ask consumer to free any recycled state
// any = consumer.freeRAM();
//
// iter++;
// }
//
// if (infoStream != null)
// message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
// }
// }
}
MergeMethods
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
<<<<<<< MINE
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
=======
>>>>>>> YOURS
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
// Current segment we are working on
// Next docID to be added
// # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
boolean bufferIsFull;
// True when it's time to write segment
private volatile boolean closed;
// True if an abort is pending
PrintStream infoStream;
SimilarityProvider similarityProvider;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
List<String> newFiles;
final IndexWriter indexWriter;
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
// Deletes for our still-in-RAM (to be flushed next) segment
SegmentDeletes pendingDeletes;
/**
* Create and return a new DocWriterBuffer.
*/
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarity = similarity;
pushConfigChange();
}
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles;
// List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
/** Reset after a flush */
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
// for testing
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
void close() {
closed = true;
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
/** Does the synchronized work to finish/flush the
* inverted document. */
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
/* Initial chunks size of the shared int[] blocks used to
store postings data */
/* Allocate another int[] from the shared pool */
/* Return int[]s to the pool */
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
<<<<<<< MINE
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
=======
>>>>>>> YOURS
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
// Current segment we are working on
// Next docID to be added
// # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
boolean bufferIsFull;
// True when it's time to write segment
private volatile boolean closed;
// True if an abort is pending
PrintStream infoStream;
SimilarityProvider similarityProvider;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
List<String> newFiles;
final IndexWriter indexWriter;
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
// Deletes for our still-in-RAM (to be flushed next) segment
SegmentDeletes pendingDeletes;
/**
* Create and return a new DocWriterBuffer.
*/
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarity = similarity;
pushConfigChange();
}
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles;
// List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
/** Reset after a flush */
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
// for testing
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
void close() {
closed = true;
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
/** Does the synchronized work to finish/flush the
* inverted document. */
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
/* Initial chunks size of the shared int[] blocks used to
store postings data */
/* Allocate another int[] from the shared pool */
/* Return int[]s to the pool */
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
}
KeepBothMethods
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
<<<<<<< MINE
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
=======
>>>>>>> YOURS
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
// Current segment we are working on
// Next docID to be added
// # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
boolean bufferIsFull;
// True when it's time to write segment
private volatile boolean closed;
// True if an abort is pending
PrintStream infoStream;
SimilarityProvider similarityProvider;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
List<String> newFiles;
final IndexWriter indexWriter;
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
// Deletes for our still-in-RAM (to be flushed next) segment
SegmentDeletes pendingDeletes;
/**
* Create and return a new DocWriterBuffer.
*/
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarityProvider = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarityProvider = similarity;
}
}
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
pushConfigChange();
}
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles;
// List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
/** Reset after a flush */
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
// for testing
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
void close() {
closed = true;
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
/** Does the synchronized work to finish/flush the
* inverted document. */
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
/* Initial chunks size of the shared int[] blocks used to
store postings data */
/* Allocate another int[] from the shared pool */
/* Return int[]s to the pool */
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
<<<<<<< MINE
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
=======
>>>>>>> YOURS
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
// Current segment we are working on
// Next docID to be added
// # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
boolean bufferIsFull;
// True when it's time to write segment
private volatile boolean closed;
// True if an abort is pending
PrintStream infoStream;
SimilarityProvider similarityProvider;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
List<String> newFiles;
final IndexWriter indexWriter;
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
// Deletes for our still-in-RAM (to be flushed next) segment
SegmentDeletes pendingDeletes;
/**
* Create and return a new DocWriterBuffer.
*/
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarityProvider = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarityProvider = similarity;
}
}
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
pushConfigChange();
}
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles;
// List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
/** Reset after a flush */
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
// for testing
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
void close() {
closed = true;
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
/** Does the synchronized work to finish/flush the
* inverted document. */
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
/* Initial chunks size of the shared int[] blocks used to
store postings data */
/* Allocate another int[] from the shared pool */
/* Return int[]s to the pool */
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
}
Safe
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
<<<<<<< MINE
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
=======
>>>>>>> YOURS
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
// Current segment we are working on
// Next docID to be added
// # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
boolean bufferIsFull;
// True when it's time to write segment
private volatile boolean closed;
// True if an abort is pending
PrintStream infoStream;
SimilarityProvider similarityProvider;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
List<String> newFiles;
final IndexWriter indexWriter;
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
// Deletes for our still-in-RAM (to be flushed next) segment
SegmentDeletes pendingDeletes;
/**
* Create and return a new DocWriterBuffer.
*/
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
<<<<<<< MINE
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
pushConfigChange();
}
=======
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarityProvider = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarityProvider = similarity;
}
}
>>>>>>> YOURS
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles;
// List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
/** Reset after a flush */
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
// for testing
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
void close() {
closed = true;
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
/** Does the synchronized work to finish/flush the
* inverted document. */
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
/* Initial chunks size of the shared int[] blocks used to
store postings data */
/* Allocate another int[] from the shared pool */
/* Return int[]s to the pool */
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
}
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
<<<<<<< MINE
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
=======
>>>>>>> YOURS
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
// Current segment we are working on
// Next docID to be added
// # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
boolean bufferIsFull;
// True when it's time to write segment
private volatile boolean closed;
// True if an abort is pending
PrintStream infoStream;
SimilarityProvider similarityProvider;
// max # simultaneous threads; if there are more than
// this, they wait for others to finish first
List<String> newFiles;
final IndexWriter indexWriter;
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
// Deletes for our still-in-RAM (to be flushed next) segment
SegmentDeletes pendingDeletes;
/**
* Create and return a new DocWriterBuffer.
*/
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
<<<<<<< MINE
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
pushConfigChange();
}
=======
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarityProvider = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarityProvider = similarity;
}
}
>>>>>>> YOURS
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles;
// List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
/** Reset after a flush */
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
// for testing
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
void close() {
closed = true;
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
/** Does the synchronized work to finish/flush the
* inverted document. */
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
/* if you increase this, you must fix field cache impl for
* getTerms/getTermsIndex requires <= 32768. */
/* Initial chunks size of the shared int[] blocks used to
store postings data */
/* Allocate another int[] from the shared pool */
/* Return int[]s to the pool */
/* We have three pools of RAM: Postings, byte blocks
* (holds freq/prox posting data) and per-doc buffers
* (stored fields/term vectors). Different docs require
* varying amount of storage from these classes. For
* example, docs with many unique single-occurrence short
* terms will use up the Postings RAM and hardly any of
* the other two. Whereas docs with very large terms will
* use alot of byte blocks RAM. This method just frees
* allocations from the pools once we are over-budget,
* which balances the pools to match the current docs. */
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
}
Unstructured
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
boolean bufferIsFull; // True when it's time to write segment
private volatile boolean closed;
PrintStream infoStream;
SimilarityProvider similarityProvider;
List<String> newFiles;
<<<<<<< MINE
// Deletes for our still-in-RAM (to be flushed next) segment
private SegmentDeletes pendingDeletes = new SegmentDeletes();
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
=======
final IndexWriter indexWriter;
>>>>>>> YOURS
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
SegmentDeletes pendingDeletes;
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
<<<<<<< MINE
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
=======
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
>>>>>>> YOURS
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
<<<<<<< MINE
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarityProvider = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarityProvider = similarity;
=======
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
pushConfigChange();
}
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
>>>>>>> YOURS
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles; // List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
// TODO: can we improve performance of this method by keeping track
// here in DW of whether any DWPT has deletions?
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
void close() {
closed = true;
}
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
// /* We have three pools of RAM: Postings, byte blocks
// * (holds freq/prox posting data) and per-doc buffers
// * (stored fields/term vectors). Different docs require
// * varying amount of storage from these classes. For
// * example, docs with many unique single-occurrence short
// * terms will use up the Postings RAM and hardly any of
// * the other two. Whereas docs with very large terms will
// * use alot of byte blocks RAM. This method just frees
// * allocations from the pools once we are over-budget,
// * which balances the pools to match the current docs. */
// void balanceRAM() {
//
// final boolean doBalance;
// final long deletesRAMUsed;
//
// deletesRAMUsed = bufferedDeletes.bytesUsed();
//
// synchronized(this) {
// if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
// return;
// }
//
// doBalance = bytesUsed() + deletesRAMUsed >= ramBufferSize;
// }
//
// if (doBalance) {
//
// if (infoStream != null)
// message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
// " vs trigger=" + toMB(ramBufferSize) +
// " deletesMB=" + toMB(deletesRAMUsed) +
// " byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
// " perDocFree=" + toMB(perDocAllocator.bytesUsed()));
//
// final long startBytesUsed = bytesUsed() + deletesRAMUsed;
//
// int iter = 0;
//
// // We free equally from each pool in 32 KB
// // chunks until we are below our threshold
// // (freeLevel)
//
// boolean any = true;
//
// while(bytesUsed()+deletesRAMUsed > freeLevel) {
//
// synchronized(this) {
// if (0 == perDocAllocator.numBufferedBlocks() &&
// 0 == byteBlockAllocator.numBufferedBlocks() &&
// 0 == freeIntBlocks.size() && !any) {
// // Nothing else to free -- must flush now.
// bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
// if (infoStream != null) {
// if (bytesUsed()+deletesRAMUsed > ramBufferSize)
// message(" nothing to free; set bufferIsFull");
// else
// message(" nothing to free");
// }
// break;
// }
//
// if ((0 == iter % 4) && byteBlockAllocator.numBufferedBlocks() > 0) {
// byteBlockAllocator.freeBlocks(1);
// }
// if ((1 == iter % 4) && freeIntBlocks.size() > 0) {
// freeIntBlocks.remove(freeIntBlocks.size()-1);
// bytesUsed.addAndGet(-INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT);
// }
// if ((2 == iter % 4) && perDocAllocator.numBufferedBlocks() > 0) {
// perDocAllocator.freeBlocks(32); // Remove upwards of 32 blocks (each block is 1K)
// }
// }
//
// if ((3 == iter % 4) && any)
// // Ask consumer to free any recycled state
// any = consumer.freeRAM();
//
// iter++;
// }
//
// if (infoStream != null)
// message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
// }
// }
}package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
final AtomicLong bytesUsed = new AtomicLong(0);
Directory directory;
boolean bufferIsFull; // True when it's time to write segment
private volatile boolean closed;
PrintStream infoStream;
SimilarityProvider similarityProvider;
List<String> newFiles;
<<<<<<< MINE
// Deletes for our still-in-RAM (to be flushed next) segment
private SegmentDeletes pendingDeletes = new SegmentDeletes();
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
PrintStream infoStream;
SimilarityProvider similarityProvider;
int docID;
Document doc;
String maxTermPrefix;
=======
final IndexWriter indexWriter;
>>>>>>> YOURS
private AtomicInteger numDocsInRAM = new AtomicInteger(0);
private AtomicLong ramUsed = new AtomicLong(0);
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
private final FieldInfos fieldInfos;
final BufferedDeletes bufferedDeletes;
SegmentDeletes pendingDeletes;
final IndexingChain chain;
final DocumentsWriterPerThreadPool perThreadPool;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain chain, DocumentsWriterPerThreadPool indexerThreadPool, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
this.directory = directory;
<<<<<<< MINE
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
=======
this.indexWriter = writer;
this.similarity = writer.getConfig().getSimilarity();
>>>>>>> YOURS
this.fieldInfos = fieldInfos;
this.bufferedDeletes = bufferedDeletes;
this.perThreadPool = indexerThreadPool;
this.pendingDeletes = new SegmentDeletes();
this.chain = chain;
this.perThreadPool.initialize(this);
}
boolean deleteQueries(final Query... queries) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
state.perThread.deleteQueries(queries);
deleted = true;
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Query query : queries) {
pendingDeletes.addQuery(query, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteQuery(final Query query) throws IOException {
return deleteQueries(query);
}
boolean deleteTerms(final Term... terms) throws IOException {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
deleted = true;
state.lock();
try {
state.perThread.deleteTerms(terms);
} finally {
state.unlock();
}
}
if (!deleted) {
synchronized(this) {
for (Term term : terms) {
pendingDeletes.addTerm(term, SegmentDeletes.MAX_INT);
}
}
}
return false;
}
boolean deleteTerm(final Term term) throws IOException {
return deleteTerms(term);
}
boolean deleteTerm(final Term term, ThreadState exclude) {
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean deleted = false;
while (threadsIterator.hasNext()) {
deleted = true;
ThreadState state = threadsIterator.next();
if (state != exclude) {
state.lock();
try {
state.perThread.deleteTerms(term);
} finally {
state.unlock();
}
}
}
return deleted;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return fieldInfos.hasProx();
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
pushConfigChange();
}
<<<<<<< MINE
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
this.similarityProvider = similarity;
for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarityProvider = similarity;
=======
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
pushConfigChange();
}
private final void pushConfigChange() {
Iterator<ThreadState> it = perThreadPool.getAllPerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread perThread = it.next().perThread;
perThread.docState.infoStream = this.infoStream;
perThread.docState.similarity = this.similarity;
>>>>>>> YOURS
}
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
} else {
ramBufferSize = (long) (mb*1024*1024);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Returns how many docs are currently buffered in RAM. */
int getNumDocs() {
return numDocsInRAM.get();
}
private Collection<String> abortedFiles; // List of files that were written before last abort()
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
indexWriter.message("DW: " + message);
}
private void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
boolean success = false;
try {
if (infoStream != null) {
message("docWriter: abort");
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
perThread.perThread.abort();
} finally {
perThread.unlock();
}
}
success = true;
} finally {
if (infoStream != null) {
message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
}
}
}
synchronized boolean anyChanges() {
return numDocsInRAM.get() != 0 || anyDeletions();
}
public int getBufferedDeleteTermsSize() {
int size = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
size += dwpt.pendingDeletes.terms.size();
}
size += pendingDeletes.terms.size();
return size;
}
//for testing
public int getNumBufferedDeleteTerms() {
int numDeletes = 0;
Iterator<ThreadState> it = perThreadPool.getActivePerThreadsIterator();
while (it.hasNext()) {
DocumentsWriterPerThread dwpt = it.next().perThread;
numDeletes += dwpt.pendingDeletes.numTermDeletes.get();
}
numDeletes += pendingDeletes.numTermDeletes.get();
return numDeletes;
}
// TODO: can we improve performance of this method by keeping track
// here in DW of whether any DWPT has deletions?
public synchronized boolean anyDeletions() {
if (pendingDeletes.any()) {
return true;
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
while (threadsIterator.hasNext()) {
ThreadState state = threadsIterator.next();
state.lock();
try {
if (state.perThread.pendingDeletes.any()) {
return true;
}
} finally {
state.unlock();
}
}
return false;
}
void close() {
closed = true;
}
boolean updateDocument(final Document doc, final Analyzer analyzer, final Term delTerm)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this, doc);
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
long perThreadRAMUsedBeforeAdd = dwpt.bytesUsed();
dwpt.updateDocument(doc, analyzer, delTerm);
numDocsInRAM.incrementAndGet();
newSegment = finishAddDocument(dwpt, perThreadRAMUsedBeforeAdd);
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
perThreadPool.clearThreadBindings(perThread);
indexWriter.addFlushedSegment(newSegment);
return true;
}
// delete term from other DWPTs later, so that this thread
// doesn't have to lock multiple DWPTs at the same time
if (delTerm != null) {
deleteTerm(delTerm, perThread);
}
return false;
}
private final SegmentInfo finishAddDocument(DocumentsWriterPerThread perThread,
long perThreadRAMUsedBeforeAdd) throws IOException {
SegmentInfo newSegment = null;
if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
newSegment = perThread.flush();
}
long deltaRAM = perThread.bytesUsed() - perThreadRAMUsedBeforeAdd;
long oldValue = ramUsed.get();
while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
oldValue = ramUsed.get();
}
return newSegment;
}
final void substractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
oldValue = numDocsInRAM.get();
}
}
private final void pushDeletes(SegmentInfo segmentInfo, SegmentDeletes segmentDeletes) {
synchronized(indexWriter) {
// Lock order: DW -> BD
if (segmentDeletes.any()) {
if (segmentInfo != null) {
bufferedDeletes.pushDeletes(segmentDeletes, segmentInfo);
} else if (indexWriter.segmentInfos.size() > 0) {
if (infoStream != null) {
message("flush: push buffered deletes to previously flushed segment " + indexWriter.segmentInfos.lastElement());
}
bufferedDeletes.pushDeletes(segmentDeletes, indexWriter.segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
}
// We can safely discard these deletes: since
// there are no segments, the deletions cannot
// affect anything.
}
}
}
}
final boolean flushAllThreads(final boolean flushDeletes)
throws IOException {
if (flushDeletes) {
synchronized (this) {
pushDeletes(null, pendingDeletes);
pendingDeletes = new SegmentDeletes();
}
}
Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
boolean anythingFlushed = false;
while (threadsIterator.hasNext()) {
SegmentInfo newSegment = null;
SegmentDeletes segmentDeletes = null;
ThreadState perThread = threadsIterator.next();
perThread.lock();
try {
DocumentsWriterPerThread dwpt = perThread.perThread;
final int numDocs = dwpt.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String segment = dwpt.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
newSegment = dwpt.flush();
if (newSegment != null) {
fieldInfos.update(dwpt.getFieldInfos());
anythingFlushed = true;
perThreadPool.clearThreadBindings(perThread);
if (dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
}
} else if (flushDeletes && dwpt.pendingDeletes.any()) {
segmentDeletes = dwpt.pendingDeletes;
dwpt.pendingDeletes = new SegmentDeletes();
}
} finally {
perThread.unlock();
}
if (segmentDeletes != null) {
pushDeletes(newSegment, segmentDeletes);
}
if (newSegment != null) {
// important do unlock the perThread before finishFlushedSegment
// is called to prevent deadlock on IndexWriter mutex
indexWriter.addFlushedSegment(newSegment);
}
}
return anythingFlushed;
}
// /* We have three pools of RAM: Postings, byte blocks
// * (holds freq/prox posting data) and per-doc buffers
// * (stored fields/term vectors). Different docs require
// * varying amount of storage from these classes. For
// * example, docs with many unique single-occurrence short
// * terms will use up the Postings RAM and hardly any of
// * the other two. Whereas docs with very large terms will
// * use alot of byte blocks RAM. This method just frees
// * allocations from the pools once we are over-budget,
// * which balances the pools to match the current docs. */
// void balanceRAM() {
//
// final boolean doBalance;
// final long deletesRAMUsed;
//
// deletesRAMUsed = bufferedDeletes.bytesUsed();
//
// synchronized(this) {
// if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
// return;
// }
//
// doBalance = bytesUsed() + deletesRAMUsed >= ramBufferSize;
// }
//
// if (doBalance) {
//
// if (infoStream != null)
// message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
// " vs trigger=" + toMB(ramBufferSize) +
// " deletesMB=" + toMB(deletesRAMUsed) +
// " byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
// " perDocFree=" + toMB(perDocAllocator.bytesUsed()));
//
// final long startBytesUsed = bytesUsed() + deletesRAMUsed;
//
// int iter = 0;
//
// // We free equally from each pool in 32 KB
// // chunks until we are below our threshold
// // (freeLevel)
//
// boolean any = true;
//
// while(bytesUsed()+deletesRAMUsed > freeLevel) {
//
// synchronized(this) {
// if (0 == perDocAllocator.numBufferedBlocks() &&
// 0 == byteBlockAllocator.numBufferedBlocks() &&
// 0 == freeIntBlocks.size() && !any) {
// // Nothing else to free -- must flush now.
// bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
// if (infoStream != null) {
// if (bytesUsed()+deletesRAMUsed > ramBufferSize)
// message(" nothing to free; set bufferIsFull");
// else
// message(" nothing to free");
// }
// break;
// }
//
// if ((0 == iter % 4) && byteBlockAllocator.numBufferedBlocks() > 0) {
// byteBlockAllocator.freeBlocks(1);
// }
// if ((1 == iter % 4) && freeIntBlocks.size() > 0) {
// freeIntBlocks.remove(freeIntBlocks.size()-1);
// bytesUsed.addAndGet(-INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT);
// }
// if ((2 == iter % 4) && perDocAllocator.numBufferedBlocks() > 0) {
// perDocAllocator.freeBlocks(32); // Remove upwards of 32 blocks (each block is 1K)
// }
// }
//
// if ((3 == iter % 4) && any)
// // Ask consumer to free any recycled state
// any = consumer.freeRAM();
//
// iter++;
// }
//
// if (infoStream != null)
// message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
// }
// }
}
Diff Result
No diff
Case 35 - java_lucenesolr.rev_6e8e0_28cae..BitVector.java
Base
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
/** Optimized implementation of a vector of bits. This is more-or-less like
java.util.BitSet, but also includes the following:
<ul>
<li>a count() method, which efficiently computes the number of one bits;</li>
<li>optimized read from and write to disk;</li>
<li>inlinable get() method;</li>
<li>store and load, as bit set or d-gaps, depending on sparseness;</li>
</ul>
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[(size >> 3) + 1];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1)
count++;
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
// @Override -- not until Java 1.6
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++)
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++)
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
if (isSparse()) {
writeDgaps(output); // sparse bit-set more efficiently saved as d-gaps.
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int n = count();
int m = bits.length;
for (int i=0; i<m && n>0; i++) {
if (bits[i]!=0) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
n -= BYTE_COUNTS[bits[i] & 0xFF];
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
// note: order of comparisons below set to favor smaller values (no binary range search.)
// note: adding 4 because we start with ((int) -1) to indicate d-gaps format.
// note: we write the d-gap for the byte number, and the byte (bits[i]) itself, therefore
// multiplying count by (8+8) or (8+16) or (8+24) etc.:
// - first 8 for writing bits[i] (1 byte vs. 1 bit), and
// - second part for writing the byte-number d-gap as vint.
// note: factor is for read/write of byte-arrays being faster than vints.
int factor = 10;
if (bits.length < (1<< 7)) return factor * (4 + (8+ 8)*count()) < size();
if (bits.length < (1<<14)) return factor * (4 + (8+16)*count()) < size();
if (bits.length < (1<<21)) return factor * (4 + (8+24)*count()) < size();
if (bits.length < (1<<28)) return factor * (4 + (8+32)*count()) < size();
return factor * (4 + (8+40)*count()) < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
size = input.readInt(); // read size
if (size == -1) {
readDgaps(input);
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
}
}
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
public BitVector subset(int start, int end) {
if (start < 0 || end > size() || end < start)
throw new IndexOutOfBoundsException();
// Special case -- return empty vector is start == end
if (end == start) return new BitVector(0);
byte[] bits = new byte[((end - start - 1) >>> 3) + 1];
int s = start >>> 3;
for (int i = 0; i < bits.length; i++) {
int cur = 0xFF & this.bits[i + s];
int next = i + s + 1 >= this.bits.length ? 0 : 0xFF & this.bits[i + s + 1];
bits[i] = (byte) ((cur >>> (start & 7)) | ((next << (8 - (start & 7)))));
}
int bitsToClear = (bits.length * 8 - (end - start)) % 8;
bits[bits.length - 1] &= ~(0xFF << (8 - bitsToClear));
return new BitVector(bits, end - start);
}
}
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
/** Optimized implementation of a vector of bits. This is more-or-less like
java.util.BitSet, but also includes the following:
<ul>
<li>a count() method, which efficiently computes the number of one bits;</li>
<li>optimized read from and write to disk;</li>
<li>inlinable get() method;</li>
<li>store and load, as bit set or d-gaps, depending on sparseness;</li>
</ul>
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[(size >> 3) + 1];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1)
count++;
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
// @Override -- not until Java 1.6
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++)
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++)
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
if (isSparse()) {
writeDgaps(output); // sparse bit-set more efficiently saved as d-gaps.
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int n = count();
int m = bits.length;
for (int i=0; i<m && n>0; i++) {
if (bits[i]!=0) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
n -= BYTE_COUNTS[bits[i] & 0xFF];
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
// note: order of comparisons below set to favor smaller values (no binary range search.)
// note: adding 4 because we start with ((int) -1) to indicate d-gaps format.
// note: we write the d-gap for the byte number, and the byte (bits[i]) itself, therefore
// multiplying count by (8+8) or (8+16) or (8+24) etc.:
// - first 8 for writing bits[i] (1 byte vs. 1 bit), and
// - second part for writing the byte-number d-gap as vint.
// note: factor is for read/write of byte-arrays being faster than vints.
int factor = 10;
if (bits.length < (1<< 7)) return factor * (4 + (8+ 8)*count()) < size();
if (bits.length < (1<<14)) return factor * (4 + (8+16)*count()) < size();
if (bits.length < (1<<21)) return factor * (4 + (8+24)*count()) < size();
if (bits.length < (1<<28)) return factor * (4 + (8+32)*count()) < size();
return factor * (4 + (8+40)*count()) < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
size = input.readInt(); // read size
if (size == -1) {
readDgaps(input);
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
}
}
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
public BitVector subset(int start, int end) {
if (start < 0 || end > size() || end < start)
throw new IndexOutOfBoundsException();
// Special case -- return empty vector is start == end
if (end == start) return new BitVector(0);
byte[] bits = new byte[((end - start - 1) >>> 3) + 1];
int s = start >>> 3;
for (int i = 0; i < bits.length; i++) {
int cur = 0xFF & this.bits[i + s];
int next = i + s + 1 >= this.bits.length ? 0 : 0xFF & this.bits[i + s + 1];
bits[i] = (byte) ((cur >>> (start & 7)) | ((next << (8 - (start & 7)))));
}
int bitsToClear = (bits.length * 8 - (end - start)) % 8;
bits[bits.length - 1] &= ~(0xFF << (8 - bitsToClear));
return new BitVector(bits, end - start);
}
}
Left
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
private static String CODEC = "BitVector";
// Version before version tracking was added:
public final static int VERSION_PRE = -1;
// First version:
public final static int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public final static int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public final static int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for(int idx=0;idx<bits.length;idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// Take care not to invert the "unused" bits in the
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits)-1;
bits[bits.length-1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int numCleared = size()-count();
int m = bits.length;
for (int i=0; i<m && numCleared>0; i++) {
if (bits[i]!=0xff) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8-BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1<< 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1<<14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1<<21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1<<28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[getNumBytes(size)]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last=0;
int numCleared = size()-count();
while (numCleared>0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8-BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
}
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
private static String CODEC = "BitVector";
// Version before version tracking was added:
public final static int VERSION_PRE = -1;
// First version:
public final static int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public final static int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public final static int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for(int idx=0;idx<bits.length;idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// Take care not to invert the "unused" bits in the
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits)-1;
bits[bits.length-1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int numCleared = size()-count();
int m = bits.length;
for (int i=0; i<m && numCleared>0; i++) {
if (bits[i]!=0xff) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8-BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1<< 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1<<14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1<<21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1<<28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[getNumBytes(size)]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last=0;
int numCleared = size()-count();
while (numCleared>0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8-BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
}
Right
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
java.util.BitSet, but also includes the following:
<ul>
<li>a count() method, which efficiently computes the number of one bits;</li>
<li>optimized read from and write to disk;</li>
<li>inlinable get() method;</li>
<li>store and load, as bit set or d-gaps, depending on sparseness;</li>
</ul>
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[(size >> 3) + 1];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1)
count++;
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
// @Override -- not until Java 1.6
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++)
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++)
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name, context);
try {
if (isSparse()) {
writeDgaps(output); // sparse bit-set more efficiently saved as d-gaps.
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int n = count();
int m = bits.length;
for (int i=0; i<m && n>0; i++) {
if (bits[i]!=0) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
n -= BYTE_COUNTS[bits[i] & 0xFF];
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
// note: order of comparisons below set to favor smaller values (no binary range search.)
// note: adding 4 because we start with ((int) -1) to indicate d-gaps format.
// note: we write the d-gap for the byte number, and the byte (bits[i]) itself, therefore
// multiplying count by (8+8) or (8+16) or (8+24) etc.:
// - first 8 for writing bits[i] (1 byte vs. 1 bit), and
// - second part for writing the byte-number d-gap as vint.
// note: factor is for read/write of byte-arrays being faster than vints.
int factor = 10;
if (bits.length < (1<< 7)) return factor * (4 + (8+ 8)*count()) < size();
if (bits.length < (1<<14)) return factor * (4 + (8+16)*count()) < size();
if (bits.length < (1<<21)) return factor * (4 + (8+24)*count()) < size();
if (bits.length < (1<<28)) return factor * (4 + (8+32)*count()) < size();
return factor * (4 + (8+40)*count()) < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name, context);
try {
size = input.readInt(); // read size
if (size == -1) {
readDgaps(input);
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
}
}
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
public BitVector subset(int start, int end) {
if (start < 0 || end > size() || end < start)
throw new IndexOutOfBoundsException();
// Special case -- return empty vector is start == end
if (end == start) return new BitVector(0);
byte[] bits = new byte[((end - start - 1) >>> 3) + 1];
int s = start >>> 3;
for (int i = 0; i < bits.length; i++) {
int cur = 0xFF & this.bits[i + s];
int next = i + s + 1 >= this.bits.length ? 0 : 0xFF & this.bits[i + s + 1];
bits[i] = (byte) ((cur >>> (start & 7)) | ((next << (8 - (start & 7)))));
}
int bitsToClear = (bits.length * 8 - (end - start)) % 8;
bits[bits.length - 1] &= ~(0xFF << (8 - bitsToClear));
return new BitVector(bits, end - start);
}
}
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
java.util.BitSet, but also includes the following:
<ul>
<li>a count() method, which efficiently computes the number of one bits;</li>
<li>optimized read from and write to disk;</li>
<li>inlinable get() method;</li>
<li>store and load, as bit set or d-gaps, depending on sparseness;</li>
</ul>
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[(size >> 3) + 1];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1)
count++;
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
// @Override -- not until Java 1.6
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++)
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++)
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name, context);
try {
if (isSparse()) {
writeDgaps(output); // sparse bit-set more efficiently saved as d-gaps.
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int n = count();
int m = bits.length;
for (int i=0; i<m && n>0; i++) {
if (bits[i]!=0) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
n -= BYTE_COUNTS[bits[i] & 0xFF];
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
// note: order of comparisons below set to favor smaller values (no binary range search.)
// note: adding 4 because we start with ((int) -1) to indicate d-gaps format.
// note: we write the d-gap for the byte number, and the byte (bits[i]) itself, therefore
// multiplying count by (8+8) or (8+16) or (8+24) etc.:
// - first 8 for writing bits[i] (1 byte vs. 1 bit), and
// - second part for writing the byte-number d-gap as vint.
// note: factor is for read/write of byte-arrays being faster than vints.
int factor = 10;
if (bits.length < (1<< 7)) return factor * (4 + (8+ 8)*count()) < size();
if (bits.length < (1<<14)) return factor * (4 + (8+16)*count()) < size();
if (bits.length < (1<<21)) return factor * (4 + (8+24)*count()) < size();
if (bits.length < (1<<28)) return factor * (4 + (8+32)*count()) < size();
return factor * (4 + (8+40)*count()) < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name, context);
try {
size = input.readInt(); // read size
if (size == -1) {
readDgaps(input);
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
}
}
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
public BitVector subset(int start, int end) {
if (start < 0 || end > size() || end < start)
throw new IndexOutOfBoundsException();
// Special case -- return empty vector is start == end
if (end == start) return new BitVector(0);
byte[] bits = new byte[((end - start - 1) >>> 3) + 1];
int s = start >>> 3;
for (int i = 0; i < bits.length; i++) {
int cur = 0xFF & this.bits[i + s];
int next = i + s + 1 >= this.bits.length ? 0 : 0xFF & this.bits[i + s + 1];
bits[i] = (byte) ((cur >>> (start & 7)) | ((next << (8 - (start & 7)))));
}
int bitsToClear = (bits.length * 8 - (end - start)) % 8;
bits[bits.length - 1] &= ~(0xFF << (8 - bitsToClear));
return new BitVector(bits, end - start);
}
}
MergeMethods
package org.apache.lucene.util;
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size : "bit " + bit + " is out of bounds 0.." + (size - 1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
// sum bits per byte
c += BYTE_COUNTS[bits[i] & 0xFF];
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
// sum bits per byte
c += BYTE_COUNTS[bits[i] & 0xFF];
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 };
private static String CODEC = "BitVector";
// Version before version tracking was added:
public static final int VERSION_PRE = -1;
// First version:
public static final int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public static final int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public static final int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for (int idx = 0; idx < bits.length; idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits) - 1;
bits[bits.length - 1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
// write size
output.writeInt(size());
// write count
output.writeInt(count());
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
// mark using d-gaps
output.writeInt(-1);
// write size
output.writeInt(size());
// write count
output.writeInt(count());
int last = 0;
int numCleared = size() - count();
int m = bits.length;
for (int i = 0; i < m && numCleared > 0; i++) {
if (bits[i] != 0xff) {
output.writeVInt(i - last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8 - BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1 << 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1 << 14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1 << 21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1 << 28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name);
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
// read count
count = input.readInt();
// allocate bits
bits = new byte[getNumBytes(size)];
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
// (re)read size
size = input.readInt();
// read count
count = input.readInt();
// allocate bits
bits = new byte[(size >> 3) + 1];
int last = 0;
int n = count();
while (n > 0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
// (re)read size
size = input.readInt();
// read count
count = input.readInt();
// allocate bits
bits = new byte[(size >> 3) + 1];
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last = 0;
int numCleared = size() - count();
while (numCleared > 0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8 - BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
}
package org.apache.lucene.util;
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size : "bit " + bit + " is out of bounds 0.." + (size - 1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
// sum bits per byte
c += BYTE_COUNTS[bits[i] & 0xFF];
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
// sum bits per byte
c += BYTE_COUNTS[bits[i] & 0xFF];
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 };
private static String CODEC = "BitVector";
// Version before version tracking was added:
public static final int VERSION_PRE = -1;
// First version:
public static final int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public static final int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public static final int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for (int idx = 0; idx < bits.length; idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits) - 1;
bits[bits.length - 1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
// write size
output.writeInt(size());
// write count
output.writeInt(count());
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
// mark using d-gaps
output.writeInt(-1);
// write size
output.writeInt(size());
// write count
output.writeInt(count());
int last = 0;
int numCleared = size() - count();
int m = bits.length;
for (int i = 0; i < m && numCleared > 0; i++) {
if (bits[i] != 0xff) {
output.writeVInt(i - last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8 - BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1 << 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1 << 14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1 << 21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1 << 28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name);
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
// read count
count = input.readInt();
// allocate bits
bits = new byte[getNumBytes(size)];
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
// (re)read size
size = input.readInt();
// read count
count = input.readInt();
// allocate bits
bits = new byte[(size >> 3) + 1];
int last = 0;
int n = count();
while (n > 0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
// (re)read size
size = input.readInt();
// read count
count = input.readInt();
// allocate bits
bits = new byte[(size >> 3) + 1];
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last = 0;
int numCleared = size() - count();
while (numCleared > 0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8 - BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
}
KeepBothMethods
package org.apache.lucene.util;
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size : "bit " + bit + " is out of bounds 0.." + (size - 1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
// sum bits per byte
c += BYTE_COUNTS[bits[i] & 0xFF];
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
// sum bits per byte
c += BYTE_COUNTS[bits[i] & 0xFF];
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 };
private static String CODEC = "BitVector";
// Version before version tracking was added:
public static final int VERSION_PRE = -1;
// First version:
public static final int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public static final int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public static final int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for (int idx = 0; idx < bits.length; idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits) - 1;
bits[bits.length - 1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name, context);
try {
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
// write size
output.writeInt(size());
// write count
output.writeInt(count());
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
// mark using d-gaps
output.writeInt(-1);
// write size
output.writeInt(size());
// write count
output.writeInt(count());
int last = 0;
int numCleared = size() - count();
int m = bits.length;
for (int i = 0; i < m && numCleared > 0; i++) {
if (bits[i] != 0xff) {
output.writeVInt(i - last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8 - BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1 << 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1 << 14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1 << 21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1 << 28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name, context);
try {
// read size
size = input.readInt();
if (size == -1) {
readDgaps(input);
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
// read count
count = input.readInt();
// allocate bits
bits = new byte[getNumBytes(size)];
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
// (re)read size
size = input.readInt();
// read count
count = input.readInt();
// allocate bits
bits = new byte[(size >> 3) + 1];
int last = 0;
int n = count();
while (n > 0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
// (re)read size
size = input.readInt();
// read count
count = input.readInt();
// allocate bits
bits = new byte[(size >> 3) + 1];
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last = 0;
int numCleared = size() - count();
while (numCleared > 0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8 - BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
}
package org.apache.lucene.util;
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size : "bit " + bit + " is out of bounds 0.." + (size - 1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
// sum bits per byte
c += BYTE_COUNTS[bits[i] & 0xFF];
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
// sum bits per byte
c += BYTE_COUNTS[bits[i] & 0xFF];
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 };
private static String CODEC = "BitVector";
// Version before version tracking was added:
public static final int VERSION_PRE = -1;
// First version:
public static final int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public static final int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public static final int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for (int idx = 0; idx < bits.length; idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits) - 1;
bits[bits.length - 1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name, context);
try {
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
// write size
output.writeInt(size());
// write count
output.writeInt(count());
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
// mark using d-gaps
output.writeInt(-1);
// write size
output.writeInt(size());
// write count
output.writeInt(count());
int last = 0;
int numCleared = size() - count();
int m = bits.length;
for (int i = 0; i < m && numCleared > 0; i++) {
if (bits[i] != 0xff) {
output.writeVInt(i - last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8 - BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1 << 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1 << 14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1 << 21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1 << 28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name, context);
try {
// read size
size = input.readInt();
if (size == -1) {
readDgaps(input);
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
// read count
count = input.readInt();
// allocate bits
bits = new byte[getNumBytes(size)];
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
// (re)read size
size = input.readInt();
// read count
count = input.readInt();
// allocate bits
bits = new byte[(size >> 3) + 1];
int last = 0;
int n = count();
while (n > 0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
// (re)read size
size = input.readInt();
// read count
count = input.readInt();
// allocate bits
bits = new byte[(size >> 3) + 1];
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last = 0;
int numCleared = size() - count();
while (numCleared > 0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8 - BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
}
Safe
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
private static String CODEC = "BitVector";
// Version before version tracking was added:
public final static int VERSION_PRE = -1;
// First version:
public final static int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public final static int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public final static int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for(int idx=0;idx<bits.length;idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// Take care not to invert the "unused" bits in the
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits)-1;
bits[bits.length-1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name, context);
try {
if (isSparse()) {
writeDgaps(output); // sparse bit-set more efficiently saved as d-gaps.
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int numCleared = size()-count();
int m = bits.length;
for (int i=0; i<m && numCleared>0; i++) {
if (bits[i]!=0xff) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8-BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Write as a d-gaps list */
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1<< 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1<<14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1<<21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1<<28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
<<<<<<< MINE
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name, context);
try {
size = input.readInt(); // read size
if (size == -1) {
readDgaps(input);
} else {
readBits(input);
}
} finally {
input.close();
}
}
=======
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
>>>>>>> YOURS
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[getNumBytes(size)]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last=0;
int numCleared = size()-count();
while (numCleared>0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8-BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
/** read as a d-gaps list */
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
}
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
private static String CODEC = "BitVector";
// Version before version tracking was added:
public final static int VERSION_PRE = -1;
// First version:
public final static int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public final static int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public final static int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for(int idx=0;idx<bits.length;idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// Take care not to invert the "unused" bits in the
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits)-1;
bits[bits.length-1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name, context);
try {
if (isSparse()) {
writeDgaps(output); // sparse bit-set more efficiently saved as d-gaps.
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int numCleared = size()-count();
int m = bits.length;
for (int i=0; i<m && numCleared>0; i++) {
if (bits[i]!=0xff) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8-BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Write as a d-gaps list */
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1<< 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1<<14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1<<21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1<<28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
<<<<<<< MINE
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name, context);
try {
size = input.readInt(); // read size
if (size == -1) {
readDgaps(input);
} else {
readBits(input);
}
} finally {
input.close();
}
}
=======
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
>>>>>>> YOURS
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[getNumBytes(size)]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last=0;
int numCleared = size()-count();
while (numCleared>0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8-BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
/** read as a d-gaps list */
/**
* Retrieve a subset of this BitVector.
*
* @param start
* starting index, inclusive
* @param end
* ending index, exclusive
* @return subset
*/
}
Unstructured
package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
private static String CODEC = "BitVector";
// Version before version tracking was added:
public final static int VERSION_PRE = -1;
// First version:
public final static int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public final static int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public final static int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name, context);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for(int idx=0;idx<bits.length;idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// Take care not to invert the "unused" bits in the
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits)-1;
bits[bits.length-1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int numCleared = size()-count();
int m = bits.length;
for (int i=0; i<m && numCleared>0; i++) {
if (bits[i]!=0xff) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8-BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1<< 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1<<14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1<<21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1<<28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
<<<<<<< MINE
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
=======
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name, context);
>>>>>>> YOURS
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[getNumBytes(size)]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last=0;
int numCleared = size()-count();
while (numCleared>0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8-BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
}package org.apache.lucene.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IOContext.Context;
/** Optimized implementation of a vector of bits. This is more-or-less like
* java.util.BitSet, but also includes the following:
* <ul>
* <li>a count() method, which efficiently computes the number of one bits;</li>
* <li>optimized read from and write to disk;</li>
* <li>inlinable get() method;</li>
* <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
* </ul>
*
* @lucene.internal
*/
public final class BitVector implements Cloneable, Bits {
private byte[] bits;
private int size;
private int count;
private int version;
/** Constructs a vector capable of holding <code>n</code> bits. */
public BitVector(int n) {
size = n;
bits = new byte[getNumBytes(size)];
count = 0;
}
BitVector(byte[] bits, int size) {
this.bits = bits;
this.size = size;
count = -1;
}
private int getNumBytes(int size) {
int bytesLength = size >>> 3;
if ((size & 7) != 0) {
bytesLength++;
}
return bytesLength;
}
@Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
BitVector clone = new BitVector(copyBits, size);
clone.count = count;
return clone;
}
/** Sets the value of <code>bit</code> to one. */
public final void set(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
bits[bit >> 3] |= 1 << (bit & 7);
count = -1;
}
/** Sets the value of <code>bit</code> to true, and
* returns true if bit was already set */
public final boolean getAndSet(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException("bit=" + bit + " size=" + size);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) != 0)
return true;
else {
bits[pos] = (byte) (v | flag);
if (count != -1) {
count++;
assert count <= size;
}
return false;
}
}
/** Sets the value of <code>bit</code> to zero. */
public final void clear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
bits[bit >> 3] &= ~(1 << (bit & 7));
count = -1;
}
public final boolean getAndClear(int bit) {
if (bit >= size) {
throw new ArrayIndexOutOfBoundsException(bit);
}
final int pos = bit >> 3;
final int v = bits[pos];
final int flag = 1 << (bit & 7);
if ((flag & v) == 0) {
return false;
} else {
bits[pos] &= ~flag;
if (count != -1) {
count--;
assert count >= 0;
}
return true;
}
}
/** Returns <code>true</code> if <code>bit</code> is one and
<code>false</code> if it is zero. */
public final boolean get(int bit) {
assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1);
return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
}
/** Returns the number of bits in this vector. This is also one greater than
the number of the largest valid bit number. */
public final int size() {
return size;
}
@Override
public int length() {
return size;
}
/** Returns the total number of one bits in this vector. This is efficiently
computed and cached, so that, if the vector is not changed, no
recomputation is done for repeated calls. */
public final int count() {
// if the vector has been modified
if (count == -1) {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
count = c;
}
return count;
}
/** For testing */
public final int getRecomputedCount() {
int c = 0;
int end = bits.length;
for (int i = 0; i < end; i++) {
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
}
return c;
}
private static final byte[] BYTE_COUNTS = { // table of bits/byte
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
private static String CODEC = "BitVector";
// Version before version tracking was added:
public final static int VERSION_PRE = -1;
// First version:
public final static int VERSION_START = 0;
// Changed DGaps to encode gaps between cleared bits, not
// set:
public final static int VERSION_DGAPS_CLEARED = 1;
// Increment version to change it:
public final static int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
public int getVersion() {
return version;
}
/** Writes this vector to the file <code>name</code> in Directory
<code>d</code>, in a format that can be read by the constructor {@link
#BitVector(Directory, String)}. */
public final void write(Directory d, String name, IOContext context) throws IOException {
IndexOutput output = d.createOutput(name, context);
try {
output.writeInt(-2);
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
if (isSparse()) {
// sparse bit-set more efficiently saved as d-gaps.
writeClearedDgaps(output);
} else {
writeBits(output);
}
} finally {
output.close();
}
}
/** Invert all bits */
public void invertAll() {
if (count != -1) {
count = size - count;
}
if (bits.length > 0) {
for(int idx=0;idx<bits.length;idx++) {
bits[idx] = (byte) (~bits[idx]);
}
clearUnusedBits();
}
}
private void clearUnusedBits() {
// Take care not to invert the "unused" bits in the
// last byte:
if (bits.length > 0) {
final int lastNBits = size & 7;
if (lastNBits != 0) {
final int mask = (1 << lastNBits)-1;
bits[bits.length-1] &= mask;
}
}
}
/** Set all bits */
public void setAll() {
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
count = size;
}
/** Write as a bit set */
private void writeBits(IndexOutput output) throws IOException {
output.writeInt(size()); // write size
output.writeInt(count()); // write count
output.writeBytes(bits, bits.length);
}
/** Write as a d-gaps list */
private void writeClearedDgaps(IndexOutput output) throws IOException {
output.writeInt(-1); // mark using d-gaps
output.writeInt(size()); // write size
output.writeInt(count()); // write count
int last=0;
int numCleared = size()-count();
int m = bits.length;
for (int i=0; i<m && numCleared>0; i++) {
if (bits[i]!=0xff) {
output.writeVInt(i-last);
output.writeByte(bits[i]);
last = i;
numCleared -= (8-BYTE_COUNTS[bits[i] & 0xFF]);
assert numCleared >= 0;
}
}
}
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
private boolean isSparse() {
final int clearedCount = size() - count();
if (clearedCount == 0) {
return true;
}
final int avgGapLength = bits.length / clearedCount;
// expected number of bytes for vInt encoding of each gap
final int expectedDGapBytes;
if (avgGapLength <= (1<< 7)) {
expectedDGapBytes = 1;
} else if (avgGapLength <= (1<<14)) {
expectedDGapBytes = 2;
} else if (avgGapLength <= (1<<21)) {
expectedDGapBytes = 3;
} else if (avgGapLength <= (1<<28)) {
expectedDGapBytes = 4;
} else {
expectedDGapBytes = 5;
}
// +1 because we write the byte itself that contains the
// set bit
final int bytesPerSetBit = expectedDGapBytes + 1;
// note: adding 32 because we start with ((int) -1) to indicate d-gaps format.
final long expectedBits = 32 + 8 * bytesPerSetBit * count();
// note: factor is for read/write of byte-arrays being faster than vints.
final long factor = 10;
return factor * expectedBits < size();
}
/** Constructs a bit vector from the file <code>name</code> in Directory
<code>d</code>, as written by the {@link #write} method.
*/
<<<<<<< MINE
public BitVector(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
=======
public BitVector(Directory d, String name, IOContext context) throws IOException {
IndexInput input = d.openInput(name, context);
>>>>>>> YOURS
try {
final int firstInt = input.readInt();
if (firstInt == -2) {
// New format, with full header & version:
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
size = input.readInt();
} else {
version = VERSION_PRE;
size = firstInt;
}
if (size == -1) {
if (version >= VERSION_DGAPS_CLEARED) {
readClearedDgaps(input);
} else {
readSetDgaps(input);
}
} else {
readBits(input);
}
} finally {
input.close();
}
}
/** Read as a bit set */
private void readBits(IndexInput input) throws IOException {
count = input.readInt(); // read count
bits = new byte[getNumBytes(size)]; // allocate bits
input.readBytes(bits, 0, bits.length);
}
/** read as a d-gaps list */
private void readSetDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
int last=0;
int n = count();
while (n>0) {
last += input.readVInt();
bits[last] = input.readByte();
n -= BYTE_COUNTS[bits[last] & 0xFF];
assert n >= 0;
}
}
/** read as a d-gaps cleared bits list */
private void readClearedDgaps(IndexInput input) throws IOException {
size = input.readInt(); // (re)read size
count = input.readInt(); // read count
bits = new byte[(size >> 3) + 1]; // allocate bits
Arrays.fill(bits, (byte) 0xff);
clearUnusedBits();
int last=0;
int numCleared = size()-count();
while (numCleared>0) {
last += input.readVInt();
bits[last] = input.readByte();
numCleared -= 8-BYTE_COUNTS[bits[last] & 0xFF];
assert numCleared >= 0;
}
}
}
Diff Result
No diff
Case 36 - java_lucenesolr.rev_6e8e0_28cae..Directory.java
Base
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
public abstract IndexInput openInput(String name)
throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public IndexInput openInput(String name, int bufferSize) throws IOException {
return openInput(name);
}
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = to.createOutput(dest);
IndexInput is = openInput(src);
IOException priorException = null;
try {
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
public abstract IndexInput openInput(String name)
throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public IndexInput openInput(String name, int bufferSize) throws IOException {
return openInput(name);
}
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = to.createOutput(dest);
IndexInput is = openInput(src);
IOException priorException = null;
try {
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
Left
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
public abstract IndexInput openInput(String name)
throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
public IndexInput openInput(String name, int bufferSize) throws IOException {
return openInput(name);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = null;
IndexInput is = null;
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
public abstract IndexInput openInput(String name)
throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
public IndexInput openInput(String name, int bufferSize) throws IOException {
return openInput(name);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = null;
IndexInput is = null;
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
Right
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = to.createOutput(dest, context);
IndexInput is = openInput(src, context);
IOException priorException = null;
try {
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = to.createOutput(dest, context);
IndexInput is = openInput(src, context);
IOException priorException = null;
try {
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
MergeMethods
package org.apache.lucene.store;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
// for javadocs
import java.util.Collection;
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
protected volatile boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name) throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name) throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name) throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context) throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close() throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = null;
IndexInput is = null;
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
package org.apache.lucene.store;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
// for javadocs
import java.util.Collection;
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
protected volatile boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name) throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name) throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name) throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context) throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close() throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = null;
IndexInput is = null;
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
KeepBothMethods
package org.apache.lucene.store;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
// for javadocs
import java.util.Collection;
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
protected volatile boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name) throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name) throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name) throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context) throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close() throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = null;
IndexInput is = null;
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = to.createOutput(dest, context);
IndexInput is = openInput(src, context);
IOException priorException = null;
try {
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
package org.apache.lucene.store;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
// for javadocs
import java.util.Collection;
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
protected volatile boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name) throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name) throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name) throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context) throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close() throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = null;
IndexInput is = null;
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = to.createOutput(dest, context);
IndexInput is = openInput(src, context);
IOException priorException = null;
try {
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
Safe
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
<<<<<<< MINE
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = to.createOutput(dest, context);
IndexInput is = openInput(src, context);
IOException priorException = null;
try {
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
=======
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = null;
IndexInput is = null;
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
>>>>>>> YOURS
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file. */
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
<<<<<<< MINE
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = to.createOutput(dest, context);
IndexInput is = openInput(src, context);
IOException priorException = null;
try {
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
=======
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = null;
IndexInput is = null;
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
>>>>>>> YOURS
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
Unstructured
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
<<<<<<< MINE
public IndexInput openInput(String name, int bufferSize) throws IOException {
return openInput(name);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
=======
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
>>>>>>> YOURS
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
<<<<<<< MINE
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = null;
IndexInput is = null;
=======
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = to.createOutput(dest, context);
IndexInput is = openInput(src, context);
>>>>>>> YOURS
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Closeable;
import java.util.Collection; // for javadocs
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/**
* Returns an array of strings, one for each file in the directory.
*
* @throws NoSuchDirectoryException if the directory is not prepared for any
* write operations (such as {@link #createOutput(String)}).
* @throws IOException in case of other IO errors
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/**
* Returns the length of a file in the directory. This method follows the
* following contract:
* <ul>
* <li>Throws {@link FileNotFoundException} if the file does not exist
* <li>Returns a value ≥0 if the file exists, which specifies its length.
* </ul>
*
* @param name the name of the file for which to return the length.
* @throws FileNotFoundException if the file does not exist.
* @throws IOException if there was an IO error while retrieving the file's
* length.
*/
public abstract long fileLength(String name) throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name, IOContext context)
throws IOException;
/**
* Ensure that any writes to these files are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index.<br/>
* <br/>
* NOTE: Clients may call this method for same files over
* and over again, so some impls might optimize for that.
* For other impls the operation can be a noop, for various
* reasons.
*/
public abstract void sync(Collection<String> names) throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* CompoundFileDirectory}.
*/
<<<<<<< MINE
public IndexInput openInput(String name, int bufferSize) throws IOException {
return openInput(name);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* reading the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new DefaultCompoundFileDirectory(this, name, bufferSize, false);
}
/**
* Returns a {@link CompoundFileDirectory} capable of
* writing the Lucene compound file format.
* <p>
* The default implementation returns
* {@link DefaultCompoundFileDirectory}.
* @lucene.experimental
*/
public CompoundFileDirectory createCompoundOutput(String name) throws IOException {
return new DefaultCompoundFileDirectory(this, name, 1024, true);
}
=======
public abstract IndexInput openInput(String name, IOContext context) throws IOException;
>>>>>>> YOURS
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) throws IOException {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
@Override
public String toString() {
return super.toString() + " lockFactory=" + getLockFactory();
}
/**
* Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new
* file name <i>dest</i>.
* <p>
* If you want to copy the entire source directory to the destination one, you
* can do so like this:
*
* <pre>
* Directory to; // the directory to copy to
* for (String file : dir.listAll()) {
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
* }
* </pre>
* <p>
* <b>NOTE:</b> this method does not check whether <i>dest<i> exist and will
* overwrite it if it does.
*/
<<<<<<< MINE
public void copy(Directory to, String src, String dest) throws IOException {
IndexOutput os = null;
IndexInput is = null;
=======
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
IndexOutput os = to.createOutput(dest, context);
IndexInput is = openInput(src, context);
>>>>>>> YOURS
IOException priorException = null;
try {
os = to.createOutput(dest);
is = openInput(src);
is.copyBytes(os, is.length());
} catch (IOException ioe) {
priorException = ioe;
} finally {
IOUtils.closeSafely(priorException, os, is);
}
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
Diff Result
No diff
Case 37 - java_lucenesolr.rev_6e8e0_28cae..SimpleFSDirectory.java
Base
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), bufferSize, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
if (i == -1) {
throw new IOException("read past EOF");
}
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return file.length;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), bufferSize, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
if (i == -1) {
throw new IOException("read past EOF");
}
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return file.length;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
Left
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.util.IOUtils;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), bufferSize, getReadChunkSize());
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize,
getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
// throw our original exception
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
this.isClone = true; // well, we are sorta?
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.util.IOUtils;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), bufferSize, getReadChunkSize());
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize,
getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
// throw our original exception
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
this.isClone = true; // well, we are sorta?
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
Right
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.store.IOContext.Context;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(context);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
if (i == -1) {
throw new IOException("read past EOF");
}
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return file.length;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.store.IOContext.Context;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(context);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
if (i == -1) {
throw new IOException("read past EOF");
}
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return file.length;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
MergeMethods
package org.apache.lucene.store;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.store.IOContext.Context;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize, getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen = true;
length = length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen = false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
// well, we are sorta?
this.isClone = true;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len) throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError("OutOfMemoryError likely caused by the Sun VM Bug described in " + "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize " + "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone)
file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput) super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
package org.apache.lucene.store;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.store.IOContext.Context;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize, getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen = true;
length = length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen = false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
// well, we are sorta?
this.isClone = true;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len) throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError("OutOfMemoryError likely caused by the Sun VM Bug described in " + "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize " + "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone)
file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput) super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
KeepBothMethods
package org.apache.lucene.store;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.store.IOContext.Context;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize, getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen = true;
length = length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen = false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
// well, we are sorta?
this.isClone = true;
}
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(context);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len) throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError("OutOfMemoryError likely caused by the Sun VM Bug described in " + "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize " + "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone)
file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput) super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
package org.apache.lucene.store;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.store.IOContext.Context;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize, getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen = true;
length = length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen = false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
// well, we are sorta?
this.isClone = true;
}
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(context);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len) throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError("OutOfMemoryError likely caused by the Sun VM Bug described in " + "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize " + "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone)
file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput) super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
Safe
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.store.IOContext.Context;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize,
getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
// throw our original exception
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
<<<<<<< MINE
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(context);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
=======
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
>>>>>>> YOURS
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
this.isClone = true; // well, we are sorta?
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.store.IOContext.Context;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize,
getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
// throw our original exception
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
<<<<<<< MINE
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(context);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
=======
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
>>>>>>> YOURS
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
this.isClone = true; // well, we are sorta?
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
Unstructured
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
<<<<<<< MINE
import org.apache.lucene.util.IOUtils;
=======
import org.apache.lucene.store.IOContext.Context;
>>>>>>> YOURS
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize,
getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
// throw our original exception
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
<<<<<<< MINE
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
=======
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(context);
file = new Descriptor(path, "r");
>>>>>>> YOURS
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
this.isClone = true; // well, we are sorta?
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
<<<<<<< MINE
import org.apache.lucene.util.IOUtils;
=======
import org.apache.lucene.store.IOContext.Context;
>>>>>>> YOURS
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), context, getReadChunkSize());
}
@Override
public CompoundFileDirectory openCompoundInput(String name, int bufferSize) throws IOException {
return new SimpleFSCompoundFileDirectory(name, bufferSize);
}
private final class SimpleFSCompoundFileDirectory extends CompoundFileDirectory {
private SimpleFSIndexInput.Descriptor fd;
public SimpleFSCompoundFileDirectory(String fileName, int readBufferSize) throws IOException {
super(SimpleFSDirectory.this, fileName, readBufferSize);
IndexInput stream = null;
try {
final File f = new File(SimpleFSDirectory.this.getDirectory(), fileName);
fd = new SimpleFSIndexInput.Descriptor(f, "r");
stream = new SimpleFSIndexInput(fd, 0, fd.length, readBufferSize,
getReadChunkSize());
initForRead(CompoundFileDirectory.readEntries(stream, SimpleFSDirectory.this, fileName));
stream.close();
} catch (IOException e) {
// throw our original exception
IOUtils.closeSafely(e, fd, stream);
}
}
@Override
public IndexInput openInputSlice(String id, long offset, long length, int readBufferSize) throws IOException {
return new SimpleFSIndexInput(fd, offset, length, readBufferSize, getReadChunkSize());
}
@Override
public synchronized void close() throws IOException {
try {
fd.close();
} finally {
super.close();
}
}
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
protected final long off;
protected final long end;
<<<<<<< MINE
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = new Descriptor(path, "r");
=======
public SimpleFSIndexInput(File path, IOContext context, int chunkSize) throws IOException {
super(context);
file = new Descriptor(path, "r");
>>>>>>> YOURS
this.chunkSize = chunkSize;
this.off = 0L;
this.end = file.length;
}
public SimpleFSIndexInput(Descriptor file, long off, long length, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
this.file = file;
this.chunkSize = chunkSize;
this.off = off;
this.end = off + length;
this.isClone = true; // well, we are sorta?
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = off + getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
if (position + len > end) {
throw new IOException("read past EOF");
}
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return end - off;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
@Override
public void copyBytes(IndexOutput out, long numBytes) throws IOException {
numBytes -= flushBuffer(out, numBytes);
// If out is FSIndexOutput, the copy will be optimized
out.copyBytes(this, numBytes);
}
}
}
Diff Result
No diff
Case 38 - java_lucenesolr.rev_6e8e0_28cae..TestDocValues.java
Base
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes);
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 3 + random.nextInt(7);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc);
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testInts() throws IOException {
long[] maxMin = new long[] {
Long.MIN_VALUE, Long.MAX_VALUE,
1, Long.MAX_VALUE,
0, Long.MAX_VALUE,
-1, Long.MAX_VALUE,
Long.MIN_VALUE, -1,
random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j+=2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes);
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j+1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes);
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes);
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 3 + random.nextInt(7);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc);
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testInts() throws IOException {
long[] maxMin = new long[] {
Long.MIN_VALUE, Long.MAX_VALUE,
1, Long.MAX_VALUE,
0, Long.MAX_VALUE,
-1, Long.MAX_VALUE,
Long.MIN_VALUE, -1,
random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j+=2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes);
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j+1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes);
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
Left
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes);
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc);
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE },
{ Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE },
{ Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE },
{ -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 }, };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS,
ValueType.VAR_INTS, };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
private void testInts(ValueType type, int maxBit) throws IOException {
long maxV = 1;
final int NUM_VALUES = 333 + random.nextInt(333);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, type);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes);
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes);
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc);
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE },
{ Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE },
{ Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE },
{ -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 }, };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS,
ValueType.VAR_INTS, };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
private void testInts(ValueType type, int maxBit) throws IOException {
long maxV = 1;
final int NUM_VALUES = 333 + random.nextInt(333);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, type);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes);
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
Right
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 3 + random.nextInt(7);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testInts() throws IOException {
long[] maxMin = new long[] {
Long.MIN_VALUE, Long.MAX_VALUE,
1, Long.MAX_VALUE,
0, Long.MAX_VALUE,
-1, Long.MAX_VALUE,
Long.MIN_VALUE, -1,
random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j+=2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j+1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 3 + random.nextInt(7);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testInts() throws IOException {
long[] maxMin = new long[] {
Long.MIN_VALUE, Long.MAX_VALUE,
1, Long.MAX_VALUE,
0, Long.MAX_VALUE,
-1, Long.MAX_VALUE,
Long.MIN_VALUE, -1,
random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j+=2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j+1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
MergeMethods
package org.apache.lucene.index.values;
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize) throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize ? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length() + " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx, bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef).utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx), bytesRef).utf8ToString());
int ord = ss.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize ? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord) - 1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues - 1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex - 1, bytesRef).clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE }, { Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE }, { Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE }, { -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 } };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS, ValueType.VAR_INTS };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
private void testInts(ValueType type, int maxBit) throws IOException {
long[] maxMin = new long[] { Long.MIN_VALUE, Long.MAX_VALUE, 1, Long.MAX_VALUE, 0, Long.MAX_VALUE, -1, Long.MAX_VALUE, Long.MIN_VALUE, -1, random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j += 2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j + 1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);
;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values, Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values.getSortedSorted(comparator);
}
}
package org.apache.lucene.index.values;
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize) throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize ? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length() + " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx, bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef).utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx), bytesRef).utf8ToString());
int ord = ss.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize ? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord) - 1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues - 1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex - 1, bytesRef).clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE }, { Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE }, { Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE }, { -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 } };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS, ValueType.VAR_INTS };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
private void testInts(ValueType type, int maxBit) throws IOException {
long[] maxMin = new long[] { Long.MIN_VALUE, Long.MAX_VALUE, 1, Long.MAX_VALUE, 0, Long.MAX_VALUE, -1, Long.MAX_VALUE, Long.MIN_VALUE, -1, random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j += 2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j + 1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);
;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values, Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values.getSortedSorted(comparator);
}
}
KeepBothMethods
package org.apache.lucene.index.values;
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize) throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize ? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length() + " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx, bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef).utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx), bytesRef).utf8ToString());
int ord = ss.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize ? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord) - 1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues - 1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex - 1, bytesRef).clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE }, { Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE }, { Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE }, { -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 } };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS, ValueType.VAR_INTS };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
private void testInts(ValueType type, int maxBit) throws IOException {
long maxV = 1;
final int NUM_VALUES = 333 + random.nextInt(333);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, type);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
public void testInts() throws IOException {
long[] maxMin = new long[] { Long.MIN_VALUE, Long.MAX_VALUE, 1, Long.MAX_VALUE, 0, Long.MAX_VALUE, -1, Long.MAX_VALUE, Long.MIN_VALUE, -1, random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j += 2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j + 1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);
;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values, Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values.getSortedSorted(comparator);
}
}
package org.apache.lucene.index.values;
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize) throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize ? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length() + " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx, bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef).utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx), bytesRef).utf8ToString());
int ord = ss.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize ? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord) - 1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues - 1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex - 1, bytesRef).clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE }, { Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE }, { Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE }, { -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 } };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS, ValueType.VAR_INTS };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] { 1, 2, 3 };
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[]) source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
private void testInts(ValueType type, int maxBit) throws IOException {
long maxV = 1;
final int NUM_VALUES = 333 + random.nextInt(333);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, type);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
public void testInts() throws IOException {
long[] maxMin = new long[] { Long.MIN_VALUE, Long.MAX_VALUE, 1, Long.MAX_VALUE, 0, Long.MAX_VALUE, -1, Long.MAX_VALUE, Long.MIN_VALUE, -1, random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j += 2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j + 1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);
;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values, Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values.getSortedSorted(comparator);
}
}
Safe
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE },
{ Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE },
{ Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE },
{ -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 }, };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS,
ValueType.VAR_INTS, };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
<<<<<<< MINE
public void testInts() throws IOException {
long[] maxMin = new long[] {
Long.MIN_VALUE, Long.MAX_VALUE,
1, Long.MAX_VALUE,
0, Long.MAX_VALUE,
-1, Long.MAX_VALUE,
Long.MIN_VALUE, -1,
random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j+=2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j+1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
=======
private void testInts(ValueType type, int maxBit) throws IOException {
long maxV = 1;
final int NUM_VALUES = 333 + random.nextInt(333);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, type);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
>>>>>>> YOURS
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE },
{ Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE },
{ Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE },
{ -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 }, };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS,
ValueType.VAR_INTS, };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
<<<<<<< MINE
public void testInts() throws IOException {
long[] maxMin = new long[] {
Long.MIN_VALUE, Long.MAX_VALUE,
1, Long.MAX_VALUE,
0, Long.MAX_VALUE,
-1, Long.MAX_VALUE,
Long.MIN_VALUE, -1,
random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j+=2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j+1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
}
=======
private void testInts(ValueType type, int maxBit) throws IOException {
long maxV = 1;
final int NUM_VALUES = 333 + random.nextInt(333);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, type);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
>>>>>>> YOURS
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
Unstructured
package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
<<<<<<< MINE
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE },
{ Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE },
{ Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE },
{ -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 }, };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS,
ValueType.VAR_INTS, };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
=======
public void testInts() throws IOException {
long[] maxMin = new long[] {
Long.MIN_VALUE, Long.MAX_VALUE,
1, Long.MAX_VALUE,
0, Long.MAX_VALUE,
-1, Long.MAX_VALUE,
Long.MIN_VALUE, -1,
random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j+=2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j+1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
>>>>>>> YOURS
<<<<<<< MINE
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
=======
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
>>>>>>> YOURS
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
private void testInts(ValueType type, int maxBit) throws IOException {
long maxV = 1;
final int NUM_VALUES = 333 + random.nextInt(333);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, type);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}package org.apache.lucene.index.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocValues extends LuceneTestCase {
// TODO -- for sorted test, do our own Sort of the
// values and verify it's identical
public void testBytesStraight() throws IOException {
runTestBytes(Bytes.Mode.STRAIGHT, true);
runTestBytes(Bytes.Mode.STRAIGHT, false);
}
public void testBytesDeref() throws IOException {
runTestBytes(Bytes.Mode.DEREF, true);
runTestBytes(Bytes.Mode.DEREF, false);
}
public void testBytesSorted() throws IOException {
runTestBytes(Bytes.Mode.SORTED, true);
runTestBytes(Bytes.Mode.SORTED, false);
}
public void runTestBytes(final Bytes.Mode mode, final boolean fixedSize)
throws IOException {
final BytesRef bytesRef = new BytesRef();
final Comparator<BytesRef> comp = mode == Bytes.Mode.SORTED ? BytesRef
.getUTF8SortedAsUnicodeComparator() : null;
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Bytes.getWriter(dir, "test", mode, comp, fixedSize, trackBytes, newIOContext(random));
int maxDoc = 220;
final String[] values = new String[maxDoc];
final int fixedLength = 1 + atLeast(50);
for (int i = 0; i < 100; i++) {
final String s;
if (i > 0 && random.nextInt(5) <= 2) {
// use prior value
s = values[2 * random.nextInt(i)];
} else {
s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39));
}
values[2 * i] = s;
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), bytesRef);
w.add(2 * i, bytesRef);
}
w.finish(maxDoc);
assertEquals(0, trackBytes.get());
IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
ValuesEnum bytesEnum = getEnum(r);
assertNotNull("enum is null", bytesEnum);
BytesRef ref = bytesEnum.bytes();
for (int i = 0; i < 2; i++) {
final int idx = 2 * i;
assertEquals("doc: " + idx, idx, bytesEnum.advance(idx));
String utf8String = ref.utf8ToString();
assertEquals("doc: " + idx + " lenLeft: " + values[idx].length()
+ " lenRight: " + utf8String.length(), values[idx], utf8String);
}
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc));
assertEquals(ValuesEnum.NO_MORE_DOCS, bytesEnum.advance(maxDoc + 1));
bytesEnum.close();
}
// Verify we can load source twice:
for (int iter = 0; iter < 2; iter++) {
Source s;
IndexDocValues.SortedSource ss;
if (mode == Bytes.Mode.SORTED) {
s = ss = getSortedSource(r, comp);
} else {
s = getSource(r);
ss = null;
}
for (int i = 0; i < 100; i++) {
final int idx = 2 * i;
assertNotNull("doc " + idx + "; value=" + values[idx], s.getBytes(idx,
bytesRef));
assertEquals("doc " + idx, values[idx], s.getBytes(idx, bytesRef)
.utf8ToString());
if (ss != null) {
assertEquals("doc " + idx, values[idx], ss.getByOrd(ss.ord(idx),
bytesRef).utf8ToString());
int ord = ss
.getByValue(new BytesRef(values[idx]));
assertTrue(ord >= 0);
assertEquals(ss.ord(idx), ord);
}
}
// Lookup random strings:
if (mode == Bytes.Mode.SORTED) {
final int numValues = ss.getValueCount();
for (int i = 0; i < 1000; i++) {
BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)));
int ord = ss.getByValue(bytesValue);
if (ord >= 0) {
assertTrue(bytesValue
.bytesEquals(ss.getByOrd(ord, bytesRef)));
int count = 0;
for (int k = 0; k < 100; k++) {
if (bytesValue.utf8ToString().equals(values[2 * k])) {
assertEquals(ss.ord(2 * k), ord);
count++;
}
}
assertTrue(count > 0);
} else {
assert ord < 0;
int insertIndex = (-ord)-1;
if (insertIndex == 0) {
final BytesRef firstRef = ss.getByOrd(1, bytesRef);
// random string was before our first
assertTrue(firstRef.compareTo(bytesValue) > 0);
} else if (insertIndex == numValues) {
final BytesRef lastRef = ss.getByOrd(numValues-1, bytesRef);
// random string was after our last
assertTrue(lastRef.compareTo(bytesValue) < 0);
} else {
final BytesRef before = (BytesRef) ss.getByOrd(insertIndex-1, bytesRef)
.clone();
BytesRef after = ss.getByOrd(insertIndex, bytesRef);
assertTrue(comp.compare(before, bytesValue) < 0);
assertTrue(comp.compare(bytesValue, after) < 0);
}
}
}
}
}
r.close();
dir.close();
}
<<<<<<< MINE
public void testVariableIntsLimits() throws IOException {
long[][] minMax = new long[][] { { Long.MIN_VALUE, Long.MAX_VALUE },
{ Long.MIN_VALUE + 1, 1 }, { -1, Long.MAX_VALUE },
{ Long.MIN_VALUE, -1 }, { 1, Long.MAX_VALUE },
{ -1, Long.MAX_VALUE - 1 }, { Long.MIN_VALUE + 2, 1 }, };
ValueType[] expectedTypes = new ValueType[] { ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.FIXED_INTS_64,
ValueType.FIXED_INTS_64, ValueType.VAR_INTS, ValueType.VAR_INTS,
ValueType.VAR_INTS, };
for (int i = 0; i < minMax.length; i++) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.VAR_INTS);
w.add(0, minMax[i][0]);
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", 2);
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], source.type());
assertEquals(minMax[i][0], source.getInt(0));
assertEquals(minMax[i][1], source.getInt(1));
ValuesEnum iEnum = getEnum(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], iEnum.type());
assertEquals(0, iEnum.nextDoc());
assertEquals(minMax[i][0], iEnum.intsRef.get());
assertEquals(1, iEnum.nextDoc());
assertEquals(minMax[i][1], iEnum.intsRef.get());
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
=======
public void testInts() throws IOException {
long[] maxMin = new long[] {
Long.MIN_VALUE, Long.MAX_VALUE,
1, Long.MAX_VALUE,
0, Long.MAX_VALUE,
-1, Long.MAX_VALUE,
Long.MIN_VALUE, -1,
random.nextInt(), random.nextInt() };
for (int j = 0; j < maxMin.length; j+=2) {
long maxV = 1;
final int NUM_VALUES = 777 + random.nextInt(777);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < 63; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", false, trackBytes, newIOContext(random));
values[0] = maxMin[j];
w.add(0, values[0]);
values[1] = maxMin[j+1];
w.add(1, values[1]);
for (int i = 2; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
>>>>>>> YOURS
<<<<<<< MINE
r.close();
dir.close();
}
}
public void testVInts() throws IOException {
testInts(ValueType.VAR_INTS, 63);
}
public void testFixedInts() throws IOException {
testInts(ValueType.FIXED_INTS_64, 63);
testInts(ValueType.FIXED_INTS_32, 31);
testInts(ValueType.FIXED_INTS_16, 15);
testInts(ValueType.FIXED_INTS_8, 7);
=======
IndexDocValues r = Ints.getValues(dir, "test", false, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
>>>>>>> YOURS
}
public void testGetInt8Array() throws IOException {
byte[] sourceArray = new byte[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_8);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt16Array() throws IOException {
short[] sourceArray = new short[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_16);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt64Array() throws IOException {
long[] sourceArray = new long[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_64);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetInt32Array() throws IOException {
int[] sourceArray = new int[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, ValueType.FIXED_INTS_32);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length);
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i]);
}
r.close();
dir.close();
}
public void testGetFloat32Array() throws IOException {
float[] sourceArray = new float[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 4, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
float[] loaded = ((float[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0f);
}
r.close();
dir.close();
}
public void testGetFloat64Array() throws IOException {
double[] sourceArray = new double[] {1,2,3};
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", 8, trackBytes);
for (int i = 0; i < sourceArray.length; i++) {
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
IndexDocValues r = Floats.getValues(dir, "test", 3);
Source source = r.getSource();
assertTrue(source.hasArray());
double[] loaded = ((double[])source.getArray());
assertEquals(loaded.length, sourceArray.length);
for (int i = 0; i < loaded.length; i++) {
assertEquals("value didn't match at index " + i, sourceArray[i], loaded[i], 0.0d);
}
r.close();
dir.close();
}
private void testInts(ValueType type, int maxBit) throws IOException {
long maxV = 1;
final int NUM_VALUES = 333 + random.nextInt(333);
final long[] values = new long[NUM_VALUES];
for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Ints.getWriter(dir, "test", trackBytes, type);
for (int i = 0; i < NUM_VALUES; i++) {
final long v = random.nextLong() % (1 + maxV);
values[i] = v;
w.add(i, v);
}
final int additionalDocs = 1 + random.nextInt(9);
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs);
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
for (int i = 0; i < NUM_VALUES; i++) {
final long v = s.getInt(i);
assertEquals("index " + i, values[i], v);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i++) {
assertEquals(i, iEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.nextDoc());
iEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum iEnum = getEnum(r);
assertEquals(type, iEnum.type());
LongsRef ints = iEnum.getInt();
for (int i = 0; i < NUM_VALUES + additionalDocs; i += 1 + random.nextInt(25)) {
assertEquals(i, iEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], ints.get());
} else {
assertEquals(0, ints.get());
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, iEnum.advance(NUM_VALUES + additionalDocs));
iEnum.close();
}
r.close();
dir.close();
}
}
public void testFloats4() throws IOException {
runTestFloats(4, 0.00001);
}
private void runTestFloats(int precision, double delta) throws IOException {
Directory dir = newDirectory();
final AtomicLong trackBytes = new AtomicLong(0);
Writer w = Floats.getWriter(dir, "test", precision, trackBytes, newIOContext(random));
final int NUM_VALUES = 777 + random.nextInt(777);;
final double[] values = new double[NUM_VALUES];
for (int i = 0; i < NUM_VALUES; i++) {
final double v = precision == 4 ? random.nextFloat() : random
.nextDouble();
values[i] = v;
w.add(i, v);
}
final int additionalValues = 1 + random.nextInt(10);
w.finish(NUM_VALUES + additionalValues);
assertEquals(0, trackBytes.get());
IndexDocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
for (int i = 0; i < NUM_VALUES; i++) {
assertEquals(values[i], s.getFloat(i), 0.0f);
}
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i++) {
assertEquals(i, fEnum.nextDoc());
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.nextDoc());
fEnum.close();
}
for (int iter = 0; iter < 2; iter++) {
ValuesEnum fEnum = getEnum(r);
FloatsRef floats = fEnum.getFloat();
for (int i = 0; i < NUM_VALUES + additionalValues; i += 1 + random.nextInt(25)) {
assertEquals(i, fEnum.advance(i));
if (i < NUM_VALUES) {
assertEquals(values[i], floats.get(), delta);
} else {
assertEquals(0.0d, floats.get(), delta);
}
}
assertEquals(ValuesEnum.NO_MORE_DOCS, fEnum.advance(NUM_VALUES + additionalValues));
fEnum.close();
}
r.close();
dir.close();
}
public void testFloats8() throws IOException {
runTestFloats(8, 0.0);
}
private ValuesEnum getEnum(IndexDocValues values) throws IOException {
return random.nextBoolean() ? values.getEnum() : getSource(values).getEnum();
}
private Source getSource(IndexDocValues values) throws IOException {
// getSource uses cache internally
return random.nextBoolean() ? values.load() : values.getSource();
}
private SortedSource getSortedSource(IndexDocValues values,
Comparator<BytesRef> comparator) throws IOException {
// getSortedSource uses cache internally
return random.nextBoolean() ? values.loadSorted(comparator) : values
.getSortedSorted(comparator);
}
}
Diff Result
No diff
Case 39 - java_lucenesolr.rev_c100f_9af97..TestConfigSets.java
Base
package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import java.io.File;
import java.io.IOException;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(TEMP_DIR, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer("findsConfigSets", getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer("badConfigSet", getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
File testDirectory = new File(TEMP_DIR, "core-reload");
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import java.io.File;
import java.io.IOException;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(TEMP_DIR, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer("findsConfigSets", getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer("badConfigSet", getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
File testDirectory = new File(TEMP_DIR, "core-reload");
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
Left
package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import java.io.File;
import java.io.IOException;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer("findsConfigSets", getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer("badConfigSet", getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
File testDirectory = new File(dataDir, "core-reload");
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import java.io.File;
import java.io.IOException;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer("findsConfigSets", getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer("badConfigSet", getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
File testDirectory = new File(dataDir, "core-reload");
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
Right
package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = createTempDir();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
File testDirectory = new File(initCoreDataDir, "core-reload");
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = createTempDir();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
File testDirectory = new File(initCoreDataDir, "core-reload");
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
MergeMethods
package org.apache.solr.core;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
<<<<<<< MINE
File testDirectory = new File(dataDir, "core-reload");
=======
File testDirectory = new File(initCoreDataDir, "core-reload");
>>>>>>> YOURS
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
package org.apache.solr.core;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
<<<<<<< MINE
File testDirectory = new File(dataDir, "core-reload");
=======
File testDirectory = new File(initCoreDataDir, "core-reload");
>>>>>>> YOURS
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
KeepBothMethods
package org.apache.solr.core;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = createTempDir();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
<<<<<<< MINE
File testDirectory = new File(dataDir, "core-reload");
=======
File testDirectory = new File(initCoreDataDir, "core-reload");
>>>>>>> YOURS
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
package org.apache.solr.core;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = createTempDir();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
<<<<<<< MINE
File testDirectory = new File(dataDir, "core-reload");
=======
File testDirectory = new File(initCoreDataDir, "core-reload");
>>>>>>> YOURS
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
Safe
package org.apache.solr.core;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
<<<<<<< MINE
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = createTempDir();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
=======
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
>>>>>>> YOURS
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
<<<<<<< MINE
File testDirectory = new File(dataDir, "core-reload");
=======
File testDirectory = new File(initCoreDataDir, "core-reload");
>>>>>>> YOURS
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
package org.apache.solr.core;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
<<<<<<< MINE
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = createTempDir();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
=======
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
>>>>>>> YOURS
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
<<<<<<< MINE
File testDirectory = new File(dataDir, "core-reload");
=======
File testDirectory = new File(initCoreDataDir, "core-reload");
>>>>>>> YOURS
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
Unstructured
package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
<<<<<<< MINE
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
=======
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = createTempDir();
>>>>>>> YOURS
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
<<<<<<< MINE
File testDirectory = new File(dataDir, "core-reload");
=======
File testDirectory = new File(initCoreDataDir, "core-reload");
>>>>>>> YOURS
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.junit.internal.matchers.StringContains.containsString;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
public class TestConfigSets extends SolrTestCaseJ4 {
@Rule
public TestRule testRule = RuleChain.outerRule(new SystemPropertiesRestoreRule());
public static String solrxml = "<solr><str name=\"configSetBaseDir\">${configsets:configsets}</str></solr>";
<<<<<<< MINE
public CoreContainer setupContainer(String testName, String configSetsBaseDir) {
File testDirectory = new File(dataDir, testName);
testDirectory.mkdirs();
=======
public CoreContainer setupContainer(String configSetsBaseDir) {
File testDirectory = createTempDir();
>>>>>>> YOURS
System.setProperty("configsets", configSetsBaseDir);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
return container;
}
@Test
public void testConfigSetServiceFindsConfigSets() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
SolrCore core1 = container.create("core1", testDirectory + "/core1", "configSet", "configset-2");
assertThat(core1.getCoreDescriptor().getName(), is("core1"));
assertThat(core1.getDataDir(), is(testDirectory + "/core1" + File.separator + "data" + File.separator));
core1.close();
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testNonExistentConfigSetThrowsException() {
CoreContainer container = null;
try {
container = setupContainer(getFile("solr/configsets").getAbsolutePath());
String testDirectory = container.getResourceLoader().getInstanceDir();
container.create("core1", testDirectory + "/core1", "configSet", "nonexistent");
fail("Expected core creation to fail");
}
catch (Exception e) {
Throwable wrappedException = getWrappedException(e);
assertThat(wrappedException.getMessage(), containsString("nonexistent"));
}
finally {
if (container != null)
container.shutdown();
}
}
@Test
public void testConfigSetOnCoreReload() throws IOException {
<<<<<<< MINE
File testDirectory = new File(dataDir, "core-reload");
=======
File testDirectory = new File(initCoreDataDir, "core-reload");
>>>>>>> YOURS
testDirectory.mkdirs();
File configSetsDir = new File(testDirectory, "configsets");
FileUtils.copyDirectory(getFile("solr/configsets"), configSetsDir);
String csd = configSetsDir.getAbsolutePath();
System.setProperty("configsets", csd);
SolrResourceLoader loader = new SolrResourceLoader(testDirectory.getAbsolutePath());
CoreContainer container = new CoreContainer(loader, ConfigSolr.fromString(loader, solrxml));
container.load();
// We initially don't have a /get handler defined
SolrCore core = container.create("core1", testDirectory + "/core", "configSet", "configset-2");
container.register(core, false);
assertThat("No /get handler should be defined in the initial configuration",
core.getRequestHandler("/get"), is(nullValue()));
// Now copy in a config with a /get handler and reload
FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-withgethandler.xml"),
new File(new File(configSetsDir, "configset-2/conf"), "solrconfig.xml"));
container.reload("core1");
core = container.getCore("core1");
assertThat("A /get handler should be defined in the reloaded configuration",
core.getRequestHandler("/get"), is(notNullValue()));
core.close();
container.shutdown();
}
}
Diff Result
No diff
Case 40 - java_lucenesolr.rev_dc62b_aff97..DocValuesWriterBase.java
Base
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final boolean fasterButMoreRam;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, true);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentName;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, fasterButMoreRam);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final boolean fasterButMoreRam;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, true);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentName;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, fasterButMoreRam);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
Left
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final boolean fasterButMoreRam;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, true);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentInfo.name;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, fasterButMoreRam);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final boolean fasterButMoreRam;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, true);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentInfo.name;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, fasterButMoreRam);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
Right
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param acceptableOverheadRatio
* how to trade space for speed. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and
* {@link Type#BYTES_VAR_SORTED}.
* @see PackedInts#getReader(org.apache.lucene.store.DataInput)
*/
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentName;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.acceptableOverheadRatio = acceptableOverheadRatio;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param acceptableOverheadRatio
* how to trade space for speed. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and
* {@link Type#BYTES_VAR_SORTED}.
* @see PackedInts#getReader(org.apache.lucene.store.DataInput)
*/
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentName;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.acceptableOverheadRatio = acceptableOverheadRatio;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
MergeMethods
package org.apache.lucene.codecs.lucene40.values;
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
// javadoc
import org.apache.lucene.index.DocValues.Type;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentInfo.name;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType, PerDocProducerBase.docValuesId(segmentName, field.number), getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
package org.apache.lucene.codecs.lucene40.values;
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
// javadoc
import org.apache.lucene.index.DocValues.Type;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentInfo.name;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType, PerDocProducerBase.docValuesId(segmentName, field.number), getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
KeepBothMethods
package org.apache.lucene.codecs.lucene40.values;
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
// javadoc
import org.apache.lucene.index.DocValues.Type;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentInfo.name;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param acceptableOverheadRatio
* how to trade space for speed. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and
* {@link Type#BYTES_VAR_SORTED}.
* @see PackedInts#getReader(org.apache.lucene.store.DataInput)
*/
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentName;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.acceptableOverheadRatio = acceptableOverheadRatio;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType, PerDocProducerBase.docValuesId(segmentName, field.number), getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
package org.apache.lucene.codecs.lucene40.values;
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
// javadoc
import org.apache.lucene.index.DocValues.Type;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentInfo.name;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param acceptableOverheadRatio
* how to trade space for speed. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and
* {@link Type#BYTES_VAR_SORTED}.
* @see PackedInts#getReader(org.apache.lucene.store.DataInput)
*/
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentName;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.acceptableOverheadRatio = acceptableOverheadRatio;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType, PerDocProducerBase.docValuesId(segmentName, field.number), getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
Safe
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
<<<<<<< MINE
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentName;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.acceptableOverheadRatio = acceptableOverheadRatio;
}
=======
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentInfo.name;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
>>>>>>> YOURS
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param fasterButMoreRam whether packed ints for docvalues should be optimized for speed by rounding up the bytes
* used for a value to either 8, 16, 32 or 64 bytes. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
*/
<<<<<<< MINE
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentName;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.acceptableOverheadRatio = acceptableOverheadRatio;
}
=======
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentInfo.name;
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.fasterButMoreRam = fasterButMoreRam;
}
>>>>>>> YOURS
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
Unstructured
package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param acceptableOverheadRatio
* how to trade space for speed. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and
* {@link Type#BYTES_VAR_SORTED}.
* @see PackedInts#getReader(org.apache.lucene.store.DataInput)
*/
<<<<<<< MINE
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentInfo.name;
=======
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentName;
>>>>>>> YOURS
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.acceptableOverheadRatio = acceptableOverheadRatio;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}package org.apache.lucene.codecs.lucene40.values;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.codecs.DocValuesConsumer;
import org.apache.lucene.codecs.PerDocProducerBase;
import org.apache.lucene.codecs.PerDocConsumer;
import org.apache.lucene.codecs.lucene40.values.Writer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.DocValues.Type; // javadoc
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.packed.PackedInts;
/**
* Abstract base class for PerDocConsumer implementations
*
* @lucene.experimental
*/
public abstract class DocValuesWriterBase extends PerDocConsumer {
protected final String segmentName;
private final Counter bytesUsed;
protected final IOContext context;
private final float acceptableOverheadRatio;
/**
* Filename extension for index files
*/
public static final String INDEX_EXTENSION = "idx";
/**
* Filename extension for data files.
*/
public static final String DATA_EXTENSION = "dat";
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
*/
protected DocValuesWriterBase(PerDocWriteState state) {
this(state, PackedInts.FAST);
}
/**
* @param state The state to initiate a {@link PerDocConsumer} instance
* @param acceptableOverheadRatio
* how to trade space for speed. This option is only applicable for
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and
* {@link Type#BYTES_VAR_SORTED}.
* @see PackedInts#getReader(org.apache.lucene.store.DataInput)
*/
<<<<<<< MINE
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
this.segmentName = state.segmentInfo.name;
=======
protected DocValuesWriterBase(PerDocWriteState state, float acceptableOverheadRatio) {
this.segmentName = state.segmentName;
>>>>>>> YOURS
this.bytesUsed = state.bytesUsed;
this.context = state.context;
this.acceptableOverheadRatio = acceptableOverheadRatio;
}
protected abstract Directory getDirectory() throws IOException;
@Override
public void close() throws IOException {
}
@Override
public DocValuesConsumer addValuesField(Type valueType, FieldInfo field) throws IOException {
return Writer.create(valueType,
PerDocProducerBase.docValuesId(segmentName, field.number),
getDirectory(), getComparator(), bytesUsed, context, acceptableOverheadRatio);
}
public Comparator<BytesRef> getComparator() throws IOException {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}
Diff Result
No diff
Case 41 - java_netty.rev_326b8_6d39c..SpdySessionHandler.java
Base
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import java.net.SocketAddress;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDownstreamHandler;
import io.netty.channel.ChannelEvent;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelStateEvent;
import io.netty.channel.Channels;
import io.netty.channel.MessageEvent;
import io.netty.channel.SimpleChannelUpstreamHandler;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends SimpleChannelUpstreamHandler
implements ChannelDownstreamHandler {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(boolean server) {
super();
this.server = server;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which does not exist,
* it must return a RST_STREAM with error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the receiver should
* close the connection immediately.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may terminate the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives data on a stream which has already been torn down,
* it must ignore the data received after the teardown.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is not monotonically
* increasing, it must issue a session error with the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress());
return;
}
// Try to accept the stream
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
spdySession.receivedReply(streamID);
if (spdySynReplyFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send additional
* frames on that stream.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
/*
* Only concerned with MAX_CONCURRENT_STREAMS
*/
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, true);
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not inforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
}
super.messageReceived(ctx, e);
}
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt)
throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(spdySynStreamFrame.getStreamID(), remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, false);
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Should send a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
}
ctx.sendDownstream(evt);
}
/*
* Error Handling
*/
private void issueSessionError(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress);
future.addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(
ChannelHandlerContext ctx, MessageEvent e, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(e.getChannel()), spdyRstStreamFrame, e.getRemoteAddress());
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return (server && !serverID) || (!server && serverID);
}
private synchronized void updateConcurrentStreams(SpdySettingsFrame settings, boolean remote) {
int newConcurrentStreams = settings.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(
int streamID, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if ((maxConcurrentStreams != 0) &&
(spdySession.numActiveStreams() >= maxConcurrentStreams)) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if ((closeSessionFuture != null) && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if ((closeSessionFuture != null) && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, new DefaultSpdyGoAwayFrame(lastGoodStreamID));
return future;
}
return Channels.succeededFuture(channel);
}
private static final class ClosingChannelFutureListener implements ChannelFutureListener {
private final ChannelHandlerContext ctx;
private final ChannelStateEvent e;
ClosingChannelFutureListener(ChannelHandlerContext ctx, ChannelStateEvent e) {
this.ctx = ctx;
this.e = e;
}
public void operationComplete(ChannelFuture sentGoAwayFuture) throws Exception {
if (!(sentGoAwayFuture.getCause() instanceof ClosedChannelException)) {
Channels.close(ctx, e.getFuture());
} else {
e.getFuture().setSuccess();
}
}
}
}
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import java.net.SocketAddress;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDownstreamHandler;
import io.netty.channel.ChannelEvent;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelStateEvent;
import io.netty.channel.Channels;
import io.netty.channel.MessageEvent;
import io.netty.channel.SimpleChannelUpstreamHandler;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends SimpleChannelUpstreamHandler
implements ChannelDownstreamHandler {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(boolean server) {
super();
this.server = server;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which does not exist,
* it must return a RST_STREAM with error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the receiver should
* close the connection immediately.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may terminate the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives data on a stream which has already been torn down,
* it must ignore the data received after the teardown.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is not monotonically
* increasing, it must issue a session error with the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress());
return;
}
// Try to accept the stream
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
spdySession.receivedReply(streamID);
if (spdySynReplyFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send additional
* frames on that stream.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
/*
* Only concerned with MAX_CONCURRENT_STREAMS
*/
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, true);
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not inforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
}
super.messageReceived(ctx, e);
}
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt)
throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(spdySynStreamFrame.getStreamID(), remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, false);
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Should send a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
}
ctx.sendDownstream(evt);
}
/*
* Error Handling
*/
private void issueSessionError(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress);
future.addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(
ChannelHandlerContext ctx, MessageEvent e, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(e.getChannel()), spdyRstStreamFrame, e.getRemoteAddress());
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return (server && !serverID) || (!server && serverID);
}
private synchronized void updateConcurrentStreams(SpdySettingsFrame settings, boolean remote) {
int newConcurrentStreams = settings.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(
int streamID, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if ((maxConcurrentStreams != 0) &&
(spdySession.numActiveStreams() >= maxConcurrentStreams)) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if ((closeSessionFuture != null) && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if ((closeSessionFuture != null) && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, new DefaultSpdyGoAwayFrame(lastGoodStreamID));
return future;
}
return Channels.succeededFuture(channel);
}
private static final class ClosingChannelFutureListener implements ChannelFutureListener {
private final ChannelHandlerContext ctx;
private final ChannelStateEvent e;
ClosingChannelFutureListener(ChannelHandlerContext ctx, ChannelStateEvent e) {
this.ctx = ctx;
this.e = e;
}
public void operationComplete(ChannelFuture sentGoAwayFuture) throws Exception {
if (!(sentGoAwayFuture.getCause() instanceof ClosedChannelException)) {
Channels.close(ctx, e.getFuture());
} else {
e.getFuture().setSuccess();
}
}
}
}
Left
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import java.net.SocketAddress;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDownstreamHandler;
import io.netty.channel.ChannelEvent;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelStateEvent;
import io.netty.channel.Channels;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.MessageEvent;
import io.netty.channel.SimpleChannelUpstreamHandler;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends SimpleChannelUpstreamHandler
implements ChannelDownstreamHandler {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024; // 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException(
"unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Recipient should not send a WINDOW_UPDATE frame as it consumes the last data frame.
*/
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// Window size can become negative if we sent a SETTINGS frame that reduces the
// size of the transfer window after the peer has written data frames.
// The value is bounded by the length that SETTINGS frame decrease the window.
// This difference is stored for the session when writing the SETTINGS frame
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Window size became negative due to sender writing frame before receiving SETTINGS
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame =
new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(
ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not enforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
/*
* SPDY WINDOW_UPDATE frame processing requirements:
*
* Receivers of a WINDOW_UPDATE that cause the window size to exceed 2^31
* must send a RST_STREAM with the status code FLOW_CONTROL_ERROR.
*
* Sender should ignore all WINDOW_UPDATE frames associated with a stream
* after sending the last frame for the stream.
*/
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
}
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt)
throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Sender must not send a data frame with data length greater
* than the transfer window size.
*
* After sending each data frame, the sender decrements its
* transfer window size by the amount of data transmitted.
*
* When the window size becomes less than or equal to 0, the
* sender must pause transmitting data frames.
*/
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.sendDownstream(evt);
}
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
private void issueSessionError(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(
ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID: spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame, lastGoodStreamID, and initial window sizes
private synchronized boolean acceptStream(
int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
int maxConcurrentStreams = this.maxConcurrentStreams; // read volatile once
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(
streamID, priority, remoteSideClosed, localSideClosed, initialSendWindowSize, initialReceiveWindowSize);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
}
return Channels.succeededFuture(channel);
}
private static final class ClosingChannelFutureListener implements ChannelFutureListener {
private final ChannelHandlerContext ctx;
private final ChannelStateEvent e;
ClosingChannelFutureListener(ChannelHandlerContext ctx, ChannelStateEvent e) {
this.ctx = ctx;
this.e = e;
}
public void operationComplete(ChannelFuture sentGoAwayFuture) throws Exception {
if (!(sentGoAwayFuture.getCause() instanceof ClosedChannelException)) {
Channels.close(ctx, e.getFuture());
} else {
e.getFuture().setSuccess();
}
}
}
}
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import java.net.SocketAddress;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDownstreamHandler;
import io.netty.channel.ChannelEvent;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelStateEvent;
import io.netty.channel.Channels;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.MessageEvent;
import io.netty.channel.SimpleChannelUpstreamHandler;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends SimpleChannelUpstreamHandler
implements ChannelDownstreamHandler {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024; // 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException(
"unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Recipient should not send a WINDOW_UPDATE frame as it consumes the last data frame.
*/
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// Window size can become negative if we sent a SETTINGS frame that reduces the
// size of the transfer window after the peer has written data frames.
// The value is bounded by the length that SETTINGS frame decrease the window.
// This difference is stored for the session when writing the SETTINGS frame
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Window size became negative due to sender writing frame before receiving SETTINGS
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame =
new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(
ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not enforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
/*
* SPDY WINDOW_UPDATE frame processing requirements:
*
* Receivers of a WINDOW_UPDATE that cause the window size to exceed 2^31
* must send a RST_STREAM with the status code FLOW_CONTROL_ERROR.
*
* Sender should ignore all WINDOW_UPDATE frames associated with a stream
* after sending the last frame for the stream.
*/
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
}
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt)
throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Sender must not send a data frame with data length greater
* than the transfer window size.
*
* After sending each data frame, the sender decrements its
* transfer window size by the amount of data transmitted.
*
* When the window size becomes less than or equal to 0, the
* sender must pause transmitting data frames.
*/
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.sendDownstream(evt);
}
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
private void issueSessionError(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(
ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID: spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame, lastGoodStreamID, and initial window sizes
private synchronized boolean acceptStream(
int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
int maxConcurrentStreams = this.maxConcurrentStreams; // read volatile once
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(
streamID, priority, remoteSideClosed, localSideClosed, initialSendWindowSize, initialReceiveWindowSize);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
}
return Channels.succeededFuture(channel);
}
private static final class ClosingChannelFutureListener implements ChannelFutureListener {
private final ChannelHandlerContext ctx;
private final ChannelStateEvent e;
ClosingChannelFutureListener(ChannelHandlerContext ctx, ChannelStateEvent e) {
this.ctx = ctx;
this.e = e;
}
public void operationComplete(ChannelFuture sentGoAwayFuture) throws Exception {
if (!(sentGoAwayFuture.getCause() instanceof ClosedChannelException)) {
Channels.close(ctx, e.getFuture());
} else {
e.getFuture().setSuccess();
}
}
}
}
Right
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object> {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(boolean server) {
super();
this.server = server;
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(
ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(
ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx)
throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which does not exist,
* it must return a RST_STREAM with error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the receiver should
* close the connection immediately.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may terminate the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives data on a stream which has already been torn down,
* it must ignore the data received after the teardown.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is not monotonically
* increasing, it must issue a session error with the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx);
return;
}
// Try to accept the stream
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
spdySession.receivedReply(streamID);
if (spdySynReplyFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send additional
* frames on that stream.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
/*
* Only concerned with MAX_CONCURRENT_STREAMS
*/
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, true);
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.write(spdyPingFrame);
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not inforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
}
ctx.nextIn().messageBuffer().add(msg);
}
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx,
ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, false);
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.fireExceptionCaught(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Should send a CLOSE ChannelStateEvent
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
}
ctx.out().messageBuffer().add(msg);
}
/*
* Error Handling
*/
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(
ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
ctx.write(spdyRstStreamFrame);
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private synchronized void updateConcurrentStreams(SpdySettingsFrame settings, boolean remote) {
int newConcurrentStreams = settings.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(
int streamID, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
}
return ctx.newSucceededFuture();
}
}
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object> {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(boolean server) {
super();
this.server = server;
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(
ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(
ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx)
throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which does not exist,
* it must return a RST_STREAM with error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the receiver should
* close the connection immediately.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may terminate the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives data on a stream which has already been torn down,
* it must ignore the data received after the teardown.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is not monotonically
* increasing, it must issue a session error with the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx);
return;
}
// Try to accept the stream
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
spdySession.receivedReply(streamID);
if (spdySynReplyFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send additional
* frames on that stream.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
/*
* Only concerned with MAX_CONCURRENT_STREAMS
*/
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, true);
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.write(spdyPingFrame);
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not inforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
}
ctx.nextIn().messageBuffer().add(msg);
}
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx,
ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, false);
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.fireExceptionCaught(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Should send a CLOSE ChannelStateEvent
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
}
ctx.out().messageBuffer().add(msg);
}
/*
* Error Handling
*/
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(
ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
ctx.write(spdyRstStreamFrame);
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private synchronized void updateConcurrentStreams(SpdySettingsFrame settings, boolean remote) {
int newConcurrentStreams = settings.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(
int streamID, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
}
return ctx.newSucceededFuture();
}
}
MergeMethods
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object> {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024;
// 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException("unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg) throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame = new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() || !isRemoteInitiatedID(streamID) || spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() || isRemoteInitiatedID(streamID) || spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not enforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
}
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg) throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch(e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException("invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.sendDownstream(evt);
}
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
private void issueSessionError(ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx) throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (; ; ) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx, final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx, final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx, ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (; ; ) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
/*
* Error Handling
*/
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
ctx.write(spdyRstStreamFrame);
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID : spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if (maxConcurrentStreams != 0 && spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
}
return Channels.succeededFuture(channel);
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
}
return ctx.newSucceededFuture();
}
}
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object> {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024;
// 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException("unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg) throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame = new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() || !isRemoteInitiatedID(streamID) || spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() || isRemoteInitiatedID(streamID) || spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not enforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
}
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg) throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch(e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException("invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.sendDownstream(evt);
}
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
private void issueSessionError(ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx) throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (; ; ) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx, final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx, final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx, ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (; ; ) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
/*
* Error Handling
*/
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
ctx.write(spdyRstStreamFrame);
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID : spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if (maxConcurrentStreams != 0 && spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
}
return Channels.succeededFuture(channel);
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
}
return ctx.newSucceededFuture();
}
}
KeepBothMethods
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object> {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024;
// 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException("unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame = new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() || !isRemoteInitiatedID(streamID) || spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() || isRemoteInitiatedID(streamID) || spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not enforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
}
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt) throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch(e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException("invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.sendDownstream(evt);
}
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
private void issueSessionError(ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx) throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (; ; ) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg) throws Exception {
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which does not exist,
* it must return a RST_STREAM with error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the receiver should
* close the connection immediately.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may terminate the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives data on a stream which has already been torn down,
* it must ignore the data received after the teardown.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is not monotonically
* increasing, it must issue a session error with the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() || !isRemoteInitiatedID(streamID) || spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx);
return;
}
// Try to accept the stream
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() || isRemoteInitiatedID(streamID) || spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
spdySession.receivedReply(streamID);
if (spdySynReplyFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send additional
* frames on that stream.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
/*
* Only concerned with MAX_CONCURRENT_STREAMS
*/
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, true);
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.write(spdyPingFrame);
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not inforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
}
ctx.nextIn().messageBuffer().add(msg);
}
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx, final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx, final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx, ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (; ; ) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg) throws Exception {
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, false);
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.fireExceptionCaught(new IllegalArgumentException("invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Should send a CLOSE ChannelStateEvent
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
}
ctx.out().messageBuffer().add(msg);
}
/*
* Error Handling
*/
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
ctx.write(spdyRstStreamFrame);
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID : spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame, lastGoodStreamID, and initial window sizes
private synchronized boolean acceptStream(int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
// read volatile once
int maxConcurrentStreams = this.maxConcurrentStreams;
if (maxConcurrentStreams != 0 && spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, priority, remoteSideClosed, localSideClosed, initialSendWindowSize, initialReceiveWindowSize);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(int streamID, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if (maxConcurrentStreams != 0 && spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
}
return Channels.succeededFuture(channel);
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
}
return ctx.newSucceededFuture();
}
}
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object> {
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024;
// 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException("unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame = new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() || !isRemoteInitiatedID(streamID) || spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() || isRemoteInitiatedID(streamID) || spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not enforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
}
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt) throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch(e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException("invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.sendDownstream(evt);
}
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
private void issueSessionError(ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx) throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (; ; ) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg) throws Exception {
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which does not exist,
* it must return a RST_STREAM with error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the receiver should
* close the connection immediately.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may terminate the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives data on a stream which has already been torn down,
* it must ignore the data received after the teardown.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is not monotonically
* increasing, it must issue a session error with the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() || !isRemoteInitiatedID(streamID) || spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx);
return;
}
// Try to accept the stream
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() || isRemoteInitiatedID(streamID) || spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
spdySession.receivedReply(streamID);
if (spdySynReplyFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send additional
* frames on that stream.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
/*
* Only concerned with MAX_CONCURRENT_STREAMS
*/
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, true);
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.write(spdyPingFrame);
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not inforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
}
ctx.nextIn().messageBuffer().add(msg);
}
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx, final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx, final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx, ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (; ; ) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg) throws Exception {
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, false);
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.fireExceptionCaught(new IllegalArgumentException("invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Should send a CLOSE ChannelStateEvent
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
}
ctx.out().messageBuffer().add(msg);
}
/*
* Error Handling
*/
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
ctx.write(spdyRstStreamFrame);
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID : spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame, lastGoodStreamID, and initial window sizes
private synchronized boolean acceptStream(int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
// read volatile once
int maxConcurrentStreams = this.maxConcurrentStreams;
if (maxConcurrentStreams != 0 && spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, priority, remoteSideClosed, localSideClosed, initialSendWindowSize, initialReceiveWindowSize);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(int streamID, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if (maxConcurrentStreams != 0 && spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
}
return Channels.succeededFuture(channel);
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
}
return ctx.newSucceededFuture();
}
}
Safe
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object>
{
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024;
// 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException(
"unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Recipient should not send a WINDOW_UPDATE frame as it consumes the last data frame.
*/
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// Window size can become negative if we sent a SETTINGS frame that reduces the
// size of the transfer window after the peer has written data frames.
// The value is bounded by the length that SETTINGS frame decrease the window.
// This difference is stored for the session when writing the SETTINGS frame
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Window size became negative due to sender writing frame before receiving SETTINGS
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame =
new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(
ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not enforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
/*
* SPDY WINDOW_UPDATE frame processing requirements:
*
* Receivers of a WINDOW_UPDATE that cause the window size to exceed 2^31
* must send a RST_STREAM with the status code FLOW_CONTROL_ERROR.
*
* Sender should ignore all WINDOW_UPDATE frames associated with a stream
* after sending the last frame for the stream.
*/
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
}
<<<<<<< MINE
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, false);
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.fireExceptionCaught(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Should send a CLOSE ChannelStateEvent
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
}
ctx.out().messageBuffer().add(msg);
}
=======
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt)
throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Sender must not send a data frame with data length greater
* than the transfer window size.
*
* After sending each data frame, the sender decrements its
* transfer window size by the amount of data transmitted.
*
* When the window size becomes less than or equal to 0, the
* sender must pause transmitting data frames.
*/
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.sendDownstream(evt);
}
>>>>>>> YOURS
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
private void issueSessionError(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(
ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(
ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(
ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx)
throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which does not exist,
* it must return a RST_STREAM with error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the receiver should
* close the connection immediately.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may terminate the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives data on a stream which has already been torn down,
* it must ignore the data received after the teardown.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is not monotonically
* increasing, it must issue a session error with the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx);
return;
}
// Try to accept the stream
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
spdySession.receivedReply(streamID);
if (spdySynReplyFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send additional
* frames on that stream.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
/*
* Only concerned with MAX_CONCURRENT_STREAMS
*/
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, true);
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.write(spdyPingFrame);
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not inforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
}
ctx.nextIn().messageBuffer().add(msg);
}
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx,
ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
/*
* Error Handling
*/
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(
ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
ctx.write(spdyRstStreamFrame);
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID: spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame, lastGoodStreamID, and initial window sizes
private synchronized boolean acceptStream(
int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
int maxConcurrentStreams = this.maxConcurrentStreams; // read volatile once
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(
streamID, priority, remoteSideClosed, localSideClosed, initialSendWindowSize, initialReceiveWindowSize);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(
int streamID, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
<<<<<<< MINE
=======
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
>>>>>>> YOURS
private synchronized ChannelFuture sendGoAwayFrame(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
}
return Channels.succeededFuture(channel);
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
}
return ctx.newSucceededFuture();
}
}
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages streams within a SPDY session.
*/
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object>
{
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024;
// 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException(
"unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
/**
* Creates a new session handler.
*
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Recipient should not send a WINDOW_UPDATE frame as it consumes the last data frame.
*/
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// Window size can become negative if we sent a SETTINGS frame that reduces the
// size of the transfer window after the peer has written data frames.
// The value is bounded by the length that SETTINGS frame decrease the window.
// This difference is stored for the session when writing the SETTINGS frame
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Window size became negative due to sender writing frame before receiving SETTINGS
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame =
new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(
ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
Channels.write(ctx, Channels.future(e.getChannel()), spdyPingFrame, e.getRemoteAddress());
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not enforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
/*
* SPDY WINDOW_UPDATE frame processing requirements:
*
* Receivers of a WINDOW_UPDATE that cause the window size to exceed 2^31
* must send a RST_STREAM with the status code FLOW_CONTROL_ERROR.
*
* Sender should ignore all WINDOW_UPDATE frames associated with a stream
* after sending the last frame for the stream.
*/
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
}
<<<<<<< MINE
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, false);
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.fireExceptionCaught(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Should send a CLOSE ChannelStateEvent
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
if (spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
}
ctx.out().messageBuffer().add(msg);
}
=======
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt)
throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
ctx.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
Object msg = e.getMessage();
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Sender must not send a data frame with data length greater
* than the transfer window size.
*
* After sending each data frame, the sender decrements its
* transfer window size by the amount of data transmitted.
*
* When the window size becomes less than or equal to 0, the
* sender must pause transmitting data frames.
*/
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
e.getFuture().setFailure(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.sendDownstream(evt);
}
>>>>>>> YOURS
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
private void issueSessionError(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(
ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(
ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(
ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx)
throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which does not exist,
* it must return a RST_STREAM with error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the receiver should
* close the connection immediately.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may terminate the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives data on a stream which has already been torn down,
* it must ignore the data received after the teardown.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
}
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdyDataFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is not monotonically
* increasing, it must issue a session error with the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx);
return;
}
// Try to accept the stream
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.REFUSED_STREAM);
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code PROTOCOL_ERROR.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
spdySession.receivedReply(streamID);
if (spdySynReplyFrame.isLast()) {
// Close remote side of stream
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send additional
* frames on that stream.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
/*
* Only concerned with MAX_CONCURRENT_STREAMS
*/
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
updateConcurrentStreams(spdySettingsFrame, true);
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.write(spdyPingFrame);
return;
}
// Note: only checks that there are outstanding pings since uniqueness is not inforced
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
return;
}
}
ctx.nextIn().messageBuffer().add(msg);
}
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx,
ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
/*
* Error Handling
*/
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
}
// Send a RST_STREAM frame in response to an incoming MessageEvent
// Only called in the upstream direction
private void issueStreamError(
ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
ctx.write(spdyRstStreamFrame);
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID: spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame, lastGoodStreamID, and initial window sizes
private synchronized boolean acceptStream(
int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
int maxConcurrentStreams = this.maxConcurrentStreams; // read volatile once
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(
streamID, priority, remoteSideClosed, localSideClosed, initialSendWindowSize, initialReceiveWindowSize);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
// need to synchronize accesses to sentGoAwayFrame and lastGoodStreamID
private synchronized boolean acceptStream(
int streamID, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(streamID, remoteSideClosed, localSideClosed);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
<<<<<<< MINE
=======
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
>>>>>>> YOURS
private synchronized ChannelFuture sendGoAwayFrame(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
}
return Channels.succeededFuture(channel);
}
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
}
return ctx.newSucceededFuture();
}
}
Unstructured
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
<<<<<<< MINE
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import java.net.SocketAddress;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDownstreamHandler;
import io.netty.channel.ChannelEvent;
=======
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
>>>>>>> YOURS
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
<<<<<<< MINE
import io.netty.channel.ChannelStateEvent;
import io.netty.channel.Channels;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.MessageEvent;
import io.netty.channel.SimpleChannelUpstreamHandler;
=======
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
>>>>>>> YOURS
/**
* Manages streams within a SPDY session.
*/
<<<<<<< MINE
public class SpdySessionHandler extends SimpleChannelUpstreamHandler
implements ChannelDownstreamHandler {
=======
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object> {
>>>>>>> YOURS
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024; // 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException(
"unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(
ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(
ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx)
throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
<<<<<<< MINE
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
=======
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
>>>>>>> YOURS
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Recipient should not send a WINDOW_UPDATE frame as it consumes the last data frame.
*/
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// Window size can become negative if we sent a SETTINGS frame that reduces the
// size of the transfer window after the peer has written data frames.
// The value is bounded by the length that SETTINGS frame decrease the window.
// This difference is stored for the session when writing the SETTINGS frame
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Window size became negative due to sender writing frame before receiving SETTINGS
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame =
new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(
ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
<<<<<<< MINE
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
=======
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx);
>>>>>>> YOURS
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
<<<<<<< MINE
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
=======
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.REFUSED_STREAM);
>>>>>>> YOURS
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
>>>>>>> YOURS
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.write(spdyPingFrame);
return;
}
<<<<<<< MINE
// Note: only checks that there are outstanding pings since uniqueness is not enforced
=======
// Note: only checks that there are outstanding pings since uniqueness is not inforced
>>>>>>> YOURS
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
>>>>>>> YOURS
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
/*
* SPDY WINDOW_UPDATE frame processing requirements:
*
* Receivers of a WINDOW_UPDATE that cause the window size to exceed 2^31
* must send a RST_STREAM with the status code FLOW_CONTROL_ERROR.
*
* Sender should ignore all WINDOW_UPDATE frames associated with a stream
* after sending the last frame for the stream.
*/
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
<<<<<<< MINE
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
=======
ctx.nextIn().messageBuffer().add(msg);
>>>>>>> YOURS
}
<<<<<<< MINE
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt)
throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
=======
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.disconnect(future);
>>>>>>> YOURS
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx,
ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Sender must not send a data frame with data length greater
* than the transfer window size.
*
* After sending each data frame, the sender decrements its
* transfer window size by the amount of data transmitted.
*
* When the window size becomes less than or equal to 0, the
* sender must pause transmitting data frames.
*/
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
<<<<<<< MINE
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
=======
>>>>>>> YOURS
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
<<<<<<< MINE
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
=======
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.fireExceptionCaught(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
<<<<<<< MINE
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
=======
// Should send a CLOSE ChannelStateEvent
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
>>>>>>> YOURS
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.out().messageBuffer().add(msg);
}
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
<<<<<<< MINE
private void issueSessionError(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
=======
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
>>>>>>> YOURS
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(
<<<<<<< MINE
ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
=======
ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
>>>>>>> YOURS
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
<<<<<<< MINE
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
=======
ctx.write(spdyRstStreamFrame);
>>>>>>> YOURS
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID: spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame, lastGoodStreamID, and initial window sizes
private synchronized boolean acceptStream(
int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
<<<<<<< MINE
int maxConcurrentStreams = this.maxConcurrentStreams; // read volatile once
=======
>>>>>>> YOURS
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(
streamID, priority, remoteSideClosed, localSideClosed, initialSendWindowSize, initialReceiveWindowSize);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
<<<<<<< MINE
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
=======
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
>>>>>>> YOURS
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
<<<<<<< MINE
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
=======
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
>>>>>>> YOURS
}
<<<<<<< MINE
return Channels.succeededFuture(channel);
}
private static final class ClosingChannelFutureListener implements ChannelFutureListener {
private final ChannelHandlerContext ctx;
private final ChannelStateEvent e;
ClosingChannelFutureListener(ChannelHandlerContext ctx, ChannelStateEvent e) {
this.ctx = ctx;
this.e = e;
}
public void operationComplete(ChannelFuture sentGoAwayFuture) throws Exception {
if (!(sentGoAwayFuture.getCause() instanceof ClosedChannelException)) {
Channels.close(ctx, e.getFuture());
} else {
e.getFuture().setSuccess();
}
}
=======
return ctx.newSucceededFuture();
>>>>>>> YOURS
}
}/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.spdy;
<<<<<<< MINE
import static io.netty.handler.codec.spdy.SpdyCodecUtil.*;
import java.net.SocketAddress;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDownstreamHandler;
import io.netty.channel.ChannelEvent;
=======
import io.netty.channel.ChannelBufferHolder;
import io.netty.channel.ChannelBufferHolders;
>>>>>>> YOURS
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelHandlerContext;
<<<<<<< MINE
import io.netty.channel.ChannelStateEvent;
import io.netty.channel.Channels;
import io.netty.channel.ExceptionEvent;
import io.netty.channel.MessageEvent;
import io.netty.channel.SimpleChannelUpstreamHandler;
=======
import io.netty.channel.ChannelInboundHandlerContext;
import io.netty.channel.ChannelOutboundHandlerContext;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
>>>>>>> YOURS
/**
* Manages streams within a SPDY session.
*/
<<<<<<< MINE
public class SpdySessionHandler extends SimpleChannelUpstreamHandler
implements ChannelDownstreamHandler {
=======
public class SpdySessionHandler extends ChannelHandlerAdapter<Object, Object> {
>>>>>>> YOURS
private static final SpdyProtocolException PROTOCOL_EXCEPTION = new SpdyProtocolException();
private final SpdySession spdySession = new SpdySession();
private volatile int lastGoodStreamID;
private volatile int remoteConcurrentStreams;
private volatile int localConcurrentStreams;
private volatile int maxConcurrentStreams;
private static final int DEFAULT_WINDOW_SIZE = 64 * 1024; // 64 KB default initial window size
private volatile int initialSendWindowSize = DEFAULT_WINDOW_SIZE;
private volatile int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE;
private final Object flowControlLock = new Object();
private final AtomicInteger pings = new AtomicInteger();
private volatile boolean sentGoAwayFrame;
private volatile boolean receivedGoAwayFrame;
private volatile ChannelFuture closeSessionFuture;
private final boolean server;
private final boolean flowControl;
/**
* Creates a new session handler.
*
* @param version the protocol version
* @param server {@code true} if and only if this session handler should
* handle the server endpoint of the connection.
* {@code false} if and only if this session handler should
* handle the client endpoint of the connection.
*/
public SpdySessionHandler(int version, boolean server) {
super();
if (version < SPDY_MIN_VERSION || version > SPDY_MAX_VERSION) {
throw new IllegalArgumentException(
"unsupported version: " + version);
}
this.server = server;
this.flowControl = version >= 3;
}
@Override
public ChannelBufferHolder<Object> newOutboundBuffer(
ChannelOutboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public ChannelBufferHolder<Object> newInboundBuffer(
ChannelInboundHandlerContext<Object> ctx) throws Exception {
return ChannelBufferHolders.messageBuffer();
}
@Override
public void inboundBufferUpdated(ChannelInboundHandlerContext<Object> ctx)
throws Exception {
Queue<Object> in = ctx.in().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleInboundMessage(ctx, msg);
}
ctx.fireInboundBufferUpdated();
}
private void handleInboundMessage(ChannelInboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
/*
* SPDY Data frame processing requirements:
*
* If an endpoint receives a data frame for a Stream-ID which is not open
* and the endpoint has not sent a GOAWAY frame, it must issue a stream error
* with the error code INVALID_STREAM for the Stream-ID.
*
* If an endpoint which created the stream receives a data frame before receiving
* a SYN_REPLY on that stream, it is a protocol error, and the recipient must
* issue a stream error with the status code PROTOCOL_ERROR for the Stream-ID.
*
* If an endpoint receives multiple data frames for invalid Stream-IDs,
* it may close the session.
*
* If an endpoint refuses a stream it must ignore any data frames for that stream.
*
* If an endpoint receives a data frame after the stream is half-closed from the
* sender, it must send a RST_STREAM frame with the status STREAM_ALREADY_CLOSED.
*
* If an endpoint receives a data frame after the stream is closed, it must send
* a RST_STREAM frame with the status PROTOCOL_ERROR.
*/
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
int streamID = spdyDataFrame.getStreamID();
// Check if we received a data frame for a Stream-ID which is not open
<<<<<<< MINE
if (!spdySession.isActiveStream(streamID)) {
if (streamID <= lastGoodStreamID) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
} else if (!sentGoAwayFrame) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
=======
if (spdySession.isRemoteSideClosed(streamID)) {
if (!sentGoAwayFrame) {
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
>>>>>>> YOURS
}
return;
}
// Check if we received a data frame for a stream which is half-closed
if (spdySession.isRemoteSideClosed(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_ALREADY_CLOSED);
return;
}
// Check if we received a data frame before receiving a SYN_REPLY
if (!isRemoteInitiatedID(streamID) && !spdySession.hasReceivedReply(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Recipient should not send a WINDOW_UPDATE frame as it consumes the last data frame.
*/
if (flowControl) {
// Update receive window size
int deltaWindowSize = -1 * spdyDataFrame.getData().readableBytes();
int newWindowSize = spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
// Window size can become negative if we sent a SETTINGS frame that reduces the
// size of the transfer window after the peer has written data frames.
// The value is bounded by the length that SETTINGS frame decrease the window.
// This difference is stored for the session when writing the SETTINGS frame
// and is cleared once we send a WINDOW_UPDATE frame.
if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamID)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
// Window size became negative due to sender writing frame before receiving SETTINGS
// Send data frames upstream in initialReceiveWindowSize chunks
if (newWindowSize < 0) {
while (spdyDataFrame.getData().readableBytes() > initialReceiveWindowSize) {
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(initialReceiveWindowSize));
Channels.fireMessageReceived(ctx, partialDataFrame, e.getRemoteAddress());
}
}
// Send a WINDOW_UPDATE frame if less than half the window size remains
if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) {
deltaWindowSize = initialReceiveWindowSize - newWindowSize;
spdySession.updateReceiveWindowSize(streamID, deltaWindowSize);
SpdyWindowUpdateFrame spdyWindowUpdateFrame =
new DefaultSpdyWindowUpdateFrame(streamID, deltaWindowSize);
Channels.write(
ctx, Channels.future(e.getChannel()), spdyWindowUpdateFrame, e.getRemoteAddress());
}
}
// Close the remote side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdySynStreamFrame) {
/*
* SPDY SYN_STREAM frame processing requirements:
*
* If an endpoint receives a SYN_STREAM with a Stream-ID that is less than
* any previously received SYN_STREAM, it must issue a session error with
* the status PROTOCOL_ERROR.
*
* If an endpoint receives multiple SYN_STREAM frames with the same active
* Stream-ID, it must issue a stream error with the status code PROTOCOL_ERROR.
*
* The recipient can reject a stream by sending a stream error with the
* status code REFUSED_STREAM.
*/
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
// Check if we received a valid SYN_STREAM frame
if (spdySynStreamFrame.isInvalid() ||
!isRemoteInitiatedID(streamID) ||
spdySession.isActiveStream(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
<<<<<<< MINE
// Stream-IDs must be monotonically increasing
if (streamID <= lastGoodStreamID) {
issueSessionError(ctx, e.getChannel(), e.getRemoteAddress(), SpdySessionStatus.PROTOCOL_ERROR);
=======
// Stream-IDs must be monotonically increassing
if (streamID < lastGoodStreamID) {
issueSessionError(ctx);
>>>>>>> YOURS
return;
}
// Try to accept the stream
byte priority = spdySynStreamFrame.getPriority();
boolean remoteSideClosed = spdySynStreamFrame.isLast();
boolean localSideClosed = spdySynStreamFrame.isUnidirectional();
<<<<<<< MINE
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.REFUSED_STREAM);
=======
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.REFUSED_STREAM);
>>>>>>> YOURS
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
/*
* SPDY SYN_REPLY frame processing requirements:
*
* If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID
* it must issue a stream error with the status code STREAM_IN_USE.
*/
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Check if we received a valid SYN_REPLY frame
if (spdySynReplyFrame.isInvalid() ||
isRemoteInitiatedID(streamID) ||
spdySession.isRemoteSideClosed(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
>>>>>>> YOURS
return;
}
// Check if we have received multiple frames for the same Stream-ID
if (spdySession.hasReceivedReply(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.STREAM_IN_USE);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
spdySession.receivedReply(streamID);
// Close the remote side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyRstStreamFrame) {
/*
* SPDY RST_STREAM frame processing requirements:
*
* After receiving a RST_STREAM on a stream, the receiver must not send
* additional frames on that stream.
*
* An endpoint must not send a RST_STREAM in response to a RST_STREAM.
*/
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, true);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialSendWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
/*
* SPDY PING frame processing requirements:
*
* Receivers of a PING frame should send an identical frame to the sender
* as soon as possible.
*
* Receivers of a PING frame must ignore frames that it did not initiate
*/
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.write(spdyPingFrame);
return;
}
<<<<<<< MINE
// Note: only checks that there are outstanding pings since uniqueness is not enforced
=======
// Note: only checks that there are outstanding pings since uniqueness is not inforced
>>>>>>> YOURS
if (pings.get() == 0) {
return;
}
pings.getAndDecrement();
} else if (msg instanceof SpdyGoAwayFrame) {
receivedGoAwayFrame = true;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Check if we received a valid HEADERS frame
if (spdyHeadersFrame.isInvalid()) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.PROTOCOL_ERROR);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
if (spdySession.isRemoteSideClosed(streamID)) {
<<<<<<< MINE
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.INVALID_STREAM);
=======
issueStreamError(ctx, streamID, SpdyStreamStatus.INVALID_STREAM);
>>>>>>> YOURS
return;
}
// Close the remote side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, true);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
/*
* SPDY WINDOW_UPDATE frame processing requirements:
*
* Receivers of a WINDOW_UPDATE that cause the window size to exceed 2^31
* must send a RST_STREAM with the status code FLOW_CONTROL_ERROR.
*
* Sender should ignore all WINDOW_UPDATE frames associated with a stream
* after sending the last frame for the stream.
*/
if (flowControl) {
SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg;
int streamID = spdyWindowUpdateFrame.getStreamID();
int deltaWindowSize = spdyWindowUpdateFrame.getDeltaWindowSize();
// Ignore frames for half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
return;
}
// Check for numerical overflow
if (spdySession.getSendWindowSize(streamID) > Integer.MAX_VALUE - deltaWindowSize) {
issueStreamError(ctx, e.getRemoteAddress(), streamID, SpdyStreamStatus.FLOW_CONTROL_ERROR);
return;
}
updateSendWindowSize(ctx, streamID, deltaWindowSize);
}
return;
}
<<<<<<< MINE
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Throwable cause = e.getCause();
if (cause instanceof SpdyProtocolException) {
issueSessionError(ctx, e.getChannel(), null, SpdySessionStatus.PROTOCOL_ERROR);
}
super.exceptionCaught(ctx, e);
=======
ctx.nextIn().messageBuffer().add(msg);
>>>>>>> YOURS
}
<<<<<<< MINE
public void handleDownstream(ChannelHandlerContext ctx, ChannelEvent evt)
throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
/*
* SPDY connection requirements:
*
* When either endpoint closes the transport-level connection,
* it must first send a GOAWAY frame.
*/
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
sendGoAwayFrame(ctx, e);
return;
}
=======
@Override
public void disconnect(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.disconnect(future);
>>>>>>> YOURS
}
});
}
@Override
public void close(final ChannelOutboundHandlerContext<Object> ctx,
final ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
ctx.close(future);
}
});
}
@Override
public void flush(ChannelOutboundHandlerContext<Object> ctx,
ChannelFuture future) throws Exception {
Queue<Object> in = ctx.prevOut().messageBuffer();
for (;;) {
Object msg = in.poll();
if (msg == null) {
break;
}
handleOutboundMessage(ctx, msg);
}
ctx.flush(future);
}
private void handleOutboundMessage(ChannelOutboundHandlerContext<Object> ctx, Object msg)
throws Exception {
if (msg instanceof SpdyDataFrame) {
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg;
final int streamID = spdyDataFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
return;
}
/*
* SPDY Data frame flow control processing requirements:
*
* Sender must not send a data frame with data length greater
* than the transfer window size.
*
* After sending each data frame, the sender decrements its
* transfer window size by the amount of data transmitted.
*
* When the window size becomes less than or equal to 0, the
* sender must pause transmitting data frames.
*/
if (flowControl) {
synchronized (flowControlLock) {
int dataLength = spdyDataFrame.getData().readableBytes();
int sendWindowSize = spdySession.getSendWindowSize(streamID);
if (sendWindowSize >= dataLength) {
// Window size is large enough to send entire data frame
spdySession.updateSendWindowSize(streamID, -1 * dataLength);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
} else if (sendWindowSize > 0) {
// Stream is not stalled but we cannot send the entire frame
spdySession.updateSendWindowSize(streamID, -1 * sendWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(sendWindowSize));
// Enqueue the remaining data (will be the first frame queued)
spdySession.putPendingWrite(streamID, e);
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
return;
} else {
// Stream is stalled -- enqueue Data frame and return
spdySession.putPendingWrite(streamID, e);
return;
}
}
}
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdySynStreamFrame) {
SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg;
int streamID = spdySynStreamFrame.getStreamID();
<<<<<<< MINE
if (isRemoteInitiatedID(streamID)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
byte priority = spdySynStreamFrame.getPriority();
=======
>>>>>>> YOURS
boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional();
boolean localSideClosed = spdySynStreamFrame.isLast();
<<<<<<< MINE
if (!acceptStream(streamID, priority, remoteSideClosed, localSideClosed)) {
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
=======
if (!acceptStream(streamID, remoteSideClosed, localSideClosed)) {
issueStreamError(ctx, streamID, SpdyStreamStatus.PROTOCOL_ERROR);
>>>>>>> YOURS
return;
}
} else if (msg instanceof SpdySynReplyFrame) {
SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg;
int streamID = spdySynReplyFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (!isRemoteInitiatedID(streamID) || spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdySynReplyFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyRstStreamFrame) {
SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg;
removeStream(spdyRstStreamFrame.getStreamID());
} else if (msg instanceof SpdySettingsFrame) {
SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg;
int newConcurrentStreams =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS);
if (newConcurrentStreams >= 0) {
updateConcurrentStreams(newConcurrentStreams, false);
}
// Persistence flag are inconsistent with the use of SETTINGS to communicate
// the initial window size. Remove flags from the sender requesting that the
// value be persisted. Remove values that the sender indicates are persisted.
if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) {
spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
}
spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false);
if (flowControl) {
int newInitialWindowSize =
spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE);
if (newInitialWindowSize >= 0) {
updateInitialReceiveWindowSize(newInitialWindowSize);
}
}
} else if (msg instanceof SpdyPingFrame) {
SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg;
if (isRemoteInitiatedID(spdyPingFrame.getID())) {
ctx.fireExceptionCaught(new IllegalArgumentException(
"invalid PING ID: " + spdyPingFrame.getID()));
return;
}
pings.getAndIncrement();
} else if (msg instanceof SpdyGoAwayFrame) {
<<<<<<< MINE
// Why is this being sent? Intercept it and fail the write.
// Should have sent a CLOSE ChannelStateEvent
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
=======
// Should send a CLOSE ChannelStateEvent
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
>>>>>>> YOURS
return;
} else if (msg instanceof SpdyHeadersFrame) {
SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg;
int streamID = spdyHeadersFrame.getStreamID();
// Frames must not be sent on half-closed streams
if (spdySession.isLocalSideClosed(streamID)) {
ctx.fireExceptionCaught(PROTOCOL_EXCEPTION);
return;
}
// Close the local side of the stream if this is the last frame
if (spdyHeadersFrame.isLast()) {
halfCloseStream(streamID, false);
}
} else if (msg instanceof SpdyWindowUpdateFrame) {
// Why is this being sent? Intercept it and fail the write.
e.getFuture().setFailure(PROTOCOL_EXCEPTION);
return;
}
ctx.out().messageBuffer().add(msg);
}
/*
* SPDY Session Error Handling:
*
* When a session error occurs, the endpoint encountering the error must first
* send a GOAWAY frame with the Stream-ID of the most recently received stream
* from the remote endpoint, and the error code for why the session is terminating.
*
* After sending the GOAWAY frame, the endpoint must close the TCP connection.
*/
<<<<<<< MINE
private void issueSessionError(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
ChannelFuture future = sendGoAwayFrame(ctx, channel, remoteAddress, status);
future.addListener(ChannelFutureListener.CLOSE);
=======
private void issueSessionError(ChannelHandlerContext ctx) {
sendGoAwayFrame(ctx).addListener(ChannelFutureListener.CLOSE);
>>>>>>> YOURS
}
/*
* SPDY Stream Error Handling:
*
* Upon a stream error, the endpoint must send a RST_STREAM frame which contains
* the Stream-ID for the stream where the error occurred and the error status which
* caused the error.
*
* After sending the RST_STREAM, the stream is closed to the sending endpoint.
*
* Note: this is only called by the worker thread
*/
private void issueStreamError(
<<<<<<< MINE
ChannelHandlerContext ctx, SocketAddress remoteAddress, int streamID, SpdyStreamStatus status) {
=======
ChannelHandlerContext ctx, int streamID, SpdyStreamStatus status) {
>>>>>>> YOURS
boolean fireMessageReceived = !spdySession.isRemoteSideClosed(streamID);
removeStream(streamID);
SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamID, status);
<<<<<<< MINE
Channels.write(ctx, Channels.future(ctx.getChannel()), spdyRstStreamFrame, remoteAddress);
if (fireMessageReceived) {
Channels.fireMessageReceived(ctx, spdyRstStreamFrame, remoteAddress);
}
=======
ctx.write(spdyRstStreamFrame);
>>>>>>> YOURS
}
/*
* Helper functions
*/
private boolean isRemoteInitiatedID(int ID) {
boolean serverID = SpdyCodecUtil.isServerID(ID);
return server && !serverID || !server && serverID;
}
private void updateConcurrentStreams(int newConcurrentStreams, boolean remote) {
if (remote) {
remoteConcurrentStreams = newConcurrentStreams;
} else {
localConcurrentStreams = newConcurrentStreams;
}
if (localConcurrentStreams == remoteConcurrentStreams) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams == 0) {
maxConcurrentStreams = remoteConcurrentStreams;
return;
}
if (remoteConcurrentStreams == 0) {
maxConcurrentStreams = localConcurrentStreams;
return;
}
if (localConcurrentStreams > remoteConcurrentStreams) {
maxConcurrentStreams = remoteConcurrentStreams;
} else {
maxConcurrentStreams = localConcurrentStreams;
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialSendWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialSendWindowSize;
initialSendWindowSize = newInitialWindowSize;
for (Integer StreamID: spdySession.getActiveStreams()) {
spdySession.updateSendWindowSize(StreamID.intValue(), deltaWindowSize);
}
}
// need to synchronize to prevent new streams from being created while updating active streams
private synchronized void updateInitialReceiveWindowSize(int newInitialWindowSize) {
int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize;
initialReceiveWindowSize = newInitialWindowSize;
spdySession.updateAllReceiveWindowSizes(deltaWindowSize);
}
// need to synchronize accesses to sentGoAwayFrame, lastGoodStreamID, and initial window sizes
private synchronized boolean acceptStream(
int streamID, byte priority, boolean remoteSideClosed, boolean localSideClosed) {
// Cannot initiate any new streams after receiving or sending GOAWAY
if (receivedGoAwayFrame || sentGoAwayFrame) {
return false;
}
<<<<<<< MINE
int maxConcurrentStreams = this.maxConcurrentStreams; // read volatile once
=======
>>>>>>> YOURS
if (maxConcurrentStreams != 0 &&
spdySession.numActiveStreams() >= maxConcurrentStreams) {
return false;
}
spdySession.acceptStream(
streamID, priority, remoteSideClosed, localSideClosed, initialSendWindowSize, initialReceiveWindowSize);
if (isRemoteInitiatedID(streamID)) {
lastGoodStreamID = streamID;
}
return true;
}
private void halfCloseStream(int streamID, boolean remote) {
if (remote) {
spdySession.closeRemoteSide(streamID);
} else {
spdySession.closeLocalSide(streamID);
}
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void removeStream(int streamID) {
spdySession.removeStream(streamID);
if (closeSessionFuture != null && spdySession.noActiveStreams()) {
closeSessionFuture.setSuccess();
}
}
private void updateSendWindowSize(ChannelHandlerContext ctx, final int streamID, int deltaWindowSize) {
synchronized (flowControlLock) {
int newWindowSize = spdySession.updateSendWindowSize(streamID, deltaWindowSize);
while (newWindowSize > 0) {
// Check if we have unblocked a stalled stream
MessageEvent e = spdySession.getPendingWrite(streamID);
if (e == null) {
break;
}
SpdyDataFrame spdyDataFrame = (SpdyDataFrame) e.getMessage();
int dataFrameSize = spdyDataFrame.getData().readableBytes();
if (newWindowSize >= dataFrameSize) {
// Window size is large enough to send entire data frame
spdySession.removePendingWrite(streamID);
newWindowSize = spdySession.updateSendWindowSize(streamID, -1 * dataFrameSize);
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
// Close the local side of the stream if this is the last frame
if (spdyDataFrame.isLast()) {
halfCloseStream(streamID, false);
}
Channels.write(ctx, e.getFuture(), spdyDataFrame, e.getRemoteAddress());
} else {
// We can send a partial frame
spdySession.updateSendWindowSize(streamID, -1 * newWindowSize);
// Create a partial data frame whose length is the current window size
SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame(streamID);
partialDataFrame.setData(spdyDataFrame.getData().readSlice(newWindowSize));
ChannelFuture writeFuture = Channels.future(e.getChannel());
// The transfer window size is pre-decremented when sending a data frame downstream.
// Close the stream on write failures that leaves the transfer window in a corrupt state.
final SocketAddress remoteAddress = e.getRemoteAddress();
final ChannelHandlerContext context = ctx;
e.getFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
issueStreamError(context, remoteAddress, streamID, SpdyStreamStatus.INTERNAL_ERROR);
}
}
});
Channels.write(ctx, writeFuture, partialDataFrame, remoteAddress);
newWindowSize = 0;
}
}
}
}
<<<<<<< MINE
private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Avoid NotYetConnectedException
if (!e.getChannel().isConnected()) {
ctx.sendDownstream(e);
return;
}
ChannelFuture future = sendGoAwayFrame(ctx, e.getChannel(), null, SpdySessionStatus.OK);
if (spdySession.noActiveStreams()) {
future.addListener(new ClosingChannelFutureListener(ctx, e));
} else {
closeSessionFuture = Channels.future(e.getChannel());
closeSessionFuture.addListener(new ClosingChannelFutureListener(ctx, e));
}
}
private synchronized ChannelFuture sendGoAwayFrame(
ChannelHandlerContext ctx, Channel channel, SocketAddress remoteAddress, SpdySessionStatus status) {
=======
private synchronized ChannelFuture sendGoAwayFrame(ChannelHandlerContext ctx) {
>>>>>>> YOURS
if (!sentGoAwayFrame) {
sentGoAwayFrame = true;
<<<<<<< MINE
SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamID, status);
ChannelFuture future = Channels.future(channel);
Channels.write(ctx, future, spdyGoAwayFrame, remoteAddress);
return future;
=======
return ctx.write(new DefaultSpdyGoAwayFrame(lastGoodStreamID));
>>>>>>> YOURS
}
<<<<<<< MINE
return Channels.succeededFuture(channel);
}
private static final class ClosingChannelFutureListener implements ChannelFutureListener {
private final ChannelHandlerContext ctx;
private final ChannelStateEvent e;
ClosingChannelFutureListener(ChannelHandlerContext ctx, ChannelStateEvent e) {
this.ctx = ctx;
this.e = e;
}
public void operationComplete(ChannelFuture sentGoAwayFuture) throws Exception {
if (!(sentGoAwayFuture.getCause() instanceof ClosedChannelException)) {
Channels.close(ctx, e.getFuture());
} else {
e.getFuture().setSuccess();
}
}
=======
return ctx.newSucceededFuture();
>>>>>>> YOURS
}
}
Diff Result
No diff
Case 42 - java_ogplatform.rev_00bce_fcf35..HibernatePositionMaster.java
Base
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioId;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(PortfolioId portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<PortfolioId> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<PortfolioId> getPortfolioIds(final InstantProvider now) {
return (Set<PortfolioId>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<PortfolioId> portfolioIds = new HashSet<PortfolioId>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(PortfolioId.of("h8/" + dbPortfolio.getId()));
}
return portfolioIds;
}
});
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioId;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(PortfolioId portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<PortfolioId> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<PortfolioId> getPortfolioIds(final InstantProvider now) {
return (Set<PortfolioId>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<PortfolioId> portfolioIds = new HashSet<PortfolioId>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(PortfolioId.of("h8/" + dbPortfolio.getId()));
}
return portfolioIds;
}
});
}
}
Left
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
if (portfolioId.isNotScheme("h8")) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getValue());
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
if (portfolioId.isNotScheme("h8")) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getValue());
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
Right
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioId;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(PortfolioId portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<PortfolioId> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<PortfolioId> getPortfolioIds(final InstantProvider now) {
return (Set<PortfolioId>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<PortfolioId> portfolioIds = new HashSet<PortfolioId>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(PortfolioId.of("h8/" + dbPortfolio.getId()));
}
return portfolioIds;
}
});
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioId;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(PortfolioId portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<PortfolioId> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<PortfolioId> getPortfolioIds(final InstantProvider now) {
return (Set<PortfolioId>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<PortfolioId> portfolioIds = new HashSet<PortfolioId>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(PortfolioId.of("h8/" + dbPortfolio.getId()));
}
return portfolioIds;
}
});
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster() {
}
public void setSessionFactory(final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate(sessionFactory);
}
protected HibernateTemplate getHibernateTemplate() {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet() throws Exception {
if (getHibernateTemplate() == null) {
throw new IllegalStateException("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl(final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier(POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition(final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition(now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier>(assocBeans.size());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add(assocBean.getDomainSpecificIdentifier());
}
return new PositionImpl(position.getIdentifier(), position.getQuantity(), new IdentifierBundle(dsids));
}
private void loadPortfolioNodeChildren(final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey(bean.getIdentifier());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor(now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl(child.getName());
loadPortfolioNodeChildren(session, now, childNode, child);
node.addChildNode(childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode(now, bean)) {
node.addPosition(positionBeanToPosition(session, now, position));
}
}
public PortfolioNode getPortfolioNode(final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme().equals(PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug("rejecting invalid identity key domain '{}'", identityKey.getScheme());
return null;
}
return (PortfolioNode) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
s_logger.info("retrieve {}", identityKey.getValue());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier(now, identityKey.getValue());
if (bean == null) {
s_logger.debug("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl(bean.getName());
loadPortfolioNodeChildren(positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode(TimeSource.system().instant(), identityKey);
}
public Position getPosition(final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme().equals(Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug("rejecting invalid identity key domain '{}'", identityKey.getScheme());
return null;
}
return (Position) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier(now, identityKey.getValue());
if (bean == null) {
s_logger.debug("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition(positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition(TimeSource.system().instant(), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
//-------------------------------------------------------------------------
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster() {
}
public void setSessionFactory(final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate(sessionFactory);
}
protected HibernateTemplate getHibernateTemplate() {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet() throws Exception {
if (getHibernateTemplate() == null) {
throw new IllegalStateException("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl(final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier(POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition(final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition(now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier>(assocBeans.size());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add(assocBean.getDomainSpecificIdentifier());
}
return new PositionImpl(position.getIdentifier(), position.getQuantity(), new IdentifierBundle(dsids));
}
private void loadPortfolioNodeChildren(final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey(bean.getIdentifier());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor(now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl(child.getName());
loadPortfolioNodeChildren(session, now, childNode, child);
node.addChildNode(childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode(now, bean)) {
node.addPosition(positionBeanToPosition(session, now, position));
}
}
public PortfolioNode getPortfolioNode(final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme().equals(PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug("rejecting invalid identity key domain '{}'", identityKey.getScheme());
return null;
}
return (PortfolioNode) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
s_logger.info("retrieve {}", identityKey.getValue());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier(now, identityKey.getValue());
if (bean == null) {
s_logger.debug("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl(bean.getName());
loadPortfolioNodeChildren(positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode(TimeSource.system().instant(), identityKey);
}
public Position getPosition(final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme().equals(Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug("rejecting invalid identity key domain '{}'", identityKey.getScheme());
return null;
}
return (Position) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier(now, identityKey.getValue());
if (bean == null) {
s_logger.debug("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition(positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition(TimeSource.system().instant(), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
//-------------------------------------------------------------------------
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster() {
}
public void setSessionFactory(final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate(sessionFactory);
}
protected HibernateTemplate getHibernateTemplate() {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet() throws Exception {
if (getHibernateTemplate() == null) {
throw new IllegalStateException("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl(final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier(POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition(final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition(now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier>(assocBeans.size());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add(assocBean.getDomainSpecificIdentifier());
}
return new PositionImpl(position.getIdentifier(), position.getQuantity(), new IdentifierBundle(dsids));
}
private void loadPortfolioNodeChildren(final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey(bean.getIdentifier());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor(now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl(child.getName());
loadPortfolioNodeChildren(session, now, childNode, child);
node.addChildNode(childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode(now, bean)) {
node.addPosition(positionBeanToPosition(session, now, position));
}
}
public PortfolioNode getPortfolioNode(final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme().equals(PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug("rejecting invalid identity key domain '{}'", identityKey.getScheme());
return null;
}
return (PortfolioNode) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
s_logger.info("retrieve {}", identityKey.getValue());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier(now, identityKey.getValue());
if (bean == null) {
s_logger.debug("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl(bean.getName());
loadPortfolioNodeChildren(positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode(TimeSource.system().instant(), identityKey);
}
public Position getPosition(final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme().equals(Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug("rejecting invalid identity key domain '{}'", identityKey.getScheme());
return null;
}
return (Position) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier(now, identityKey.getValue());
if (bean == null) {
s_logger.debug("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition(positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition(TimeSource.system().instant(), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
if (portfolioId.isNotScheme("h8")) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getValue());
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster() {
}
public void setSessionFactory(final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate(sessionFactory);
}
protected HibernateTemplate getHibernateTemplate() {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet() throws Exception {
if (getHibernateTemplate() == null) {
throw new IllegalStateException("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl(final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier(POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition(final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition(now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier>(assocBeans.size());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add(assocBean.getDomainSpecificIdentifier());
}
return new PositionImpl(position.getIdentifier(), position.getQuantity(), new IdentifierBundle(dsids));
}
private void loadPortfolioNodeChildren(final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey(bean.getIdentifier());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor(now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl(child.getName());
loadPortfolioNodeChildren(session, now, childNode, child);
node.addChildNode(childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode(now, bean)) {
node.addPosition(positionBeanToPosition(session, now, position));
}
}
public PortfolioNode getPortfolioNode(final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme().equals(PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug("rejecting invalid identity key domain '{}'", identityKey.getScheme());
return null;
}
return (PortfolioNode) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
s_logger.info("retrieve {}", identityKey.getValue());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier(now, identityKey.getValue());
if (bean == null) {
s_logger.debug("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl(bean.getName());
loadPortfolioNodeChildren(positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode(TimeSource.system().instant(), identityKey);
}
public Position getPosition(final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme().equals(Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug("rejecting invalid identity key domain '{}'", identityKey.getScheme());
return null;
}
return (Position) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier(now, identityKey.getValue());
if (bean == null) {
s_logger.debug("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition(positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition(TimeSource.system().instant(), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
if (portfolioId.isNotScheme("h8")) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getValue());
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
Safe
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
//-------------------------------------------------------------------------
<<<<<<< MINE
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
=======
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
if (portfolioId.isNotScheme("h8")) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getValue());
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
//-------------------------------------------------------------------------
<<<<<<< MINE
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
=======
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
if (portfolioId.isNotScheme("h8")) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getValue());
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
Unstructured
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
<<<<<<< MINE
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
=======
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
>>>>>>> YOURS
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
<<<<<<< MINE
if (portfolioId.isNotScheme("h8")) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getValue());
=======
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
>>>>>>> YOURS
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.position.db;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.time.InstantProvider;
import javax.time.TimeSource;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.HibernateTemplate;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioImpl;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.position.PortfolioNodeImpl;
import com.opengamma.engine.position.Position;
import com.opengamma.engine.position.PositionMaster;
import com.opengamma.engine.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.util.ArgumentChecker;
/**
* A Hibernate database backed implementation of a PositionMaster.
*/
public class HibernatePositionMaster implements PositionMaster, InitializingBean {
private static final Logger s_logger = LoggerFactory.getLogger(HibernatePositionMaster.class);
private HibernateTemplate _hibernateTemplate;
public HibernatePositionMaster () {
}
public void setSessionFactory (final SessionFactory sessionFactory) {
ArgumentChecker.notNull(sessionFactory, "sessionFactory");
_hibernateTemplate = new HibernateTemplate (sessionFactory);
}
protected HibernateTemplate getHibernateTemplate () {
return _hibernateTemplate;
}
@Override
public void afterPropertiesSet () throws Exception {
if (getHibernateTemplate () == null) {
throw new IllegalStateException ("sessionFactory not set");
}
}
private static class PositionImpl implements Position {
private final Identifier _identityKey;
private final BigDecimal _quantity;
private final IdentifierBundle _securityKey;
private PositionImpl (final String identifier, final BigDecimal quantity, final IdentifierBundle securityKey) {
_identityKey = new Identifier (POSITION_IDENTITY_KEY_SCHEME, identifier);
_quantity = quantity;
_securityKey = securityKey;
}
@Override
public BigDecimal getQuantity() {
return _quantity;
}
@Override
public Security getSecurity() {
return null;
}
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
@Override
public Identifier getIdentityKey() {
return _identityKey;
}
}
// TODO this is a slow way of constructing the Node graph - there are a number of recursive queries. One of the bulk fetches could be used and the graph built up from the information in each node
private Position positionBeanToPosition (final PositionMasterSession session, final InstantProvider now, final PositionBean position) {
final Collection<IdentifierAssociationBean> assocBeans = session.getIdentifierAssociationBeanByPosition (now, position);
final Collection<Identifier> dsids = new ArrayList<Identifier> (assocBeans.size ());
for (IdentifierAssociationBean assocBean : assocBeans) {
dsids.add (assocBean.getDomainSpecificIdentifier ());
}
return new PositionImpl (position.getIdentifier (), position.getQuantity (), new IdentifierBundle (dsids));
}
private void loadPortfolioNodeChildren (final PositionMasterSession session, final InstantProvider now, final PortfolioNodeImpl node, final PortfolioNodeBean bean) {
node.setIdentityKey (bean.getIdentifier ());
for (PortfolioNodeBean child : session.getPortfolioNodeBeanByImmediateAncestor (now, bean)) {
final PortfolioNodeImpl childNode = new PortfolioNodeImpl (child.getName ());
loadPortfolioNodeChildren (session, now, childNode, child);
node.addChildNode (childNode);
}
for (final PositionBean position : session.getPositionBeanByImmediatePortfolioNode (now, bean)) {
node.addPosition (positionBeanToPosition (session, now, position));
}
}
public PortfolioNode getPortfolioNode (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (PortfolioNode.PORTFOLIO_NODE_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (PortfolioNode)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
s_logger.info ("retrieve {}", identityKey.getValue ());
final PortfolioNodeBean bean = positionMasterSession.getPortfolioNodeBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
final PortfolioNodeImpl node = new PortfolioNodeImpl (bean.getName ());
loadPortfolioNodeChildren (positionMasterSession, now, node, bean);
return node;
}
});
}
@Override
public PortfolioNode getPortfolioNode(final Identifier identityKey) {
return getPortfolioNode (TimeSource.system ().instant (), identityKey);
}
public Position getPosition (final InstantProvider now, final Identifier identityKey) {
if (!identityKey.getScheme ().equals (Position.POSITION_IDENTITY_KEY_SCHEME)) {
s_logger.debug ("rejecting invalid identity key domain '{}'", identityKey.getScheme ());
return null;
}
return (Position)getHibernateTemplate ().execute (new HibernateCallback () {
@Override
public Object doInHibernate (final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession (session);
final PositionBean bean = positionMasterSession.getPositionBeanByIdentifier (now, identityKey.getValue ());
if (bean == null) {
s_logger.debug ("bean not found for {} at {}", identityKey, now);
return null;
}
return positionBeanToPosition (positionMasterSession, now, bean);
}
});
}
@Override
public Position getPosition(final Identifier identityKey) {
return getPosition (TimeSource.system ().instant (), identityKey);
}
//-------------------------------------------------------------------------
@Override
public Portfolio getPortfolio(Identifier portfolioId) {
return getPortfolio(TimeSource.system().instant(), portfolioId);
}
<<<<<<< MINE
public Portfolio getPortfolio(final InstantProvider now, final Identifier portfolioId) {
=======
public Portfolio getPortfolio(final InstantProvider now, final PortfolioId portfolioId) {
if (portfolioId.getId().startsWith("h8/") == false) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
>>>>>>> YOURS
return (Portfolio) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
<<<<<<< MINE
if (portfolioId.isNotScheme("h8")) {
throw new IllegalArgumentException("Invalid portfolio id for Hibernate: " + portfolioId);
}
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getValue());
=======
final PortfolioBean dbPortfolio = positionMasterSession.getPortfolioBeanByIdentifier(now, portfolioId.getId().substring(3));
>>>>>>> YOURS
if (dbPortfolio == null) {
s_logger.debug("portfolio {} not found at {}", portfolioId, now);
return null;
}
final PortfolioNodeImpl rootNode = new PortfolioNodeImpl();
final PortfolioImpl portfolio = new PortfolioImpl(portfolioId, dbPortfolio.getName(), rootNode);
loadPortfolioNodeChildren(positionMasterSession, now, rootNode, dbPortfolio.getRoot());
return portfolio;
}
});
}
//-------------------------------------------------------------------------
@Override
public Set<Identifier> getPortfolioIds() {
return getPortfolioIds(TimeSource.system().instant());
}
@SuppressWarnings("unchecked")
public Set<Identifier> getPortfolioIds(final InstantProvider now) {
return (Set<Identifier>) getHibernateTemplate().execute(new HibernateCallback() {
@Override
public Object doInHibernate(final Session session) throws HibernateException, SQLException {
final PositionMasterSession positionMasterSession = new PositionMasterSession(session);
final Collection<PortfolioBean> dbPortfolios = positionMasterSession.getAllPortfolioBeans(now);
final Set<Identifier> portfolioIds = new HashSet<Identifier>();
for (PortfolioBean dbPortfolio : dbPortfolios) {
portfolioIds.add(new Identifier("h8", dbPortfolio.getIdentifier()));
}
return portfolioIds;
}
});
}
}
Diff Result
No diff
Case 43 - java_ogplatform.rev_038e2_aae4b..SingleComputationCycle.java
Base
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.View;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final View _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
// State:
/** Current state of the cycle */
private enum State {
CREATED, INPUTS_PREPARED, EXECUTING, FINISHED, CLEANED
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration =
new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(View view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_state = State.CREATED;
}
public View getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getDefinition().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph,
cache,
previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.",
new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
private DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.View;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final View _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
// State:
/** Current state of the cycle */
private enum State {
CREATED, INPUTS_PREPARED, EXECUTING, FINISHED, CLEANED
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration =
new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(View view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_state = State.CREATED;
}
public View getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getDefinition().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph,
cache,
previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.",
new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
private DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
Left
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.View;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final View _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
// State:
/** Current state of the cycle */
private enum State {
CREATED, INPUTS_PREPARED, EXECUTING, FINISHED, CLEANED
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(View view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
public View getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
// s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.View;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final View _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
// State:
/** Current state of the cycle */
private enum State {
CREATED, INPUTS_PREPARED, EXECUTING, FINISHED, CLEANED
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(View view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
public View getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
// s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
Right
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
// State:
/** Current state of the cycle */
private enum State {
CREATED, INPUTS_PREPARED, EXECUTING, FINISHED, CLEANED
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration =
new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(ViewInternal view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_state = State.CREATED;
}
public ViewInternal getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getDefinition().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph,
cache,
previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.",
new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
private DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
// State:
/** Current state of the cycle */
private enum State {
CREATED, INPUTS_PREPARED, EXECUTING, FINISHED, CLEANED
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration =
new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(ViewInternal view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_state = State.CREATED;
}
public ViewInternal getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getDefinition().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph,
cache,
previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.",
new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
private DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
/** Current state of the cycle */
private enum State {
CREATED(), INPUTS_PREPARED(), EXECUTING(), FINISHED(), CLEANED()
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(ViewInternal view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
public ViewInternal getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] { calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size() });
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
/** Current state of the cycle */
private enum State {
CREATED(), INPUTS_PREPARED(), EXECUTING(), FINISHED(), CLEANED()
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(ViewInternal view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
public ViewInternal getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] { calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size() });
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
/** Current state of the cycle */
private enum State {
CREATED(), INPUTS_PREPARED(), EXECUTING(), FINISHED(), CLEANED()
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(View view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
public SingleComputationCycle(ViewInternal view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_state = State.CREATED;
}
public ViewInternal getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] { calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size() });
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
/** Current state of the cycle */
private enum State {
CREATED(), INPUTS_PREPARED(), EXECUTING(), FINISHED(), CLEANED()
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
public SingleComputationCycle(View view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
public SingleComputationCycle(ViewInternal view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_state = State.CREATED;
}
public ViewInternal getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] { calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size() });
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
Safe
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
// State:
/** Current state of the cycle */
private enum State {
CREATED , INPUTS_PREPARED , EXECUTING , FINISHED , CLEANED}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
<<<<<<< MINE
public SingleComputationCycle(ViewInternal view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_state = State.CREATED;
}
=======
public SingleComputationCycle(View view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
>>>>>>> YOURS
public ViewInternal getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
// State:
/** Current state of the cycle */
private enum State {
CREATED , INPUTS_PREPARED , EXECUTING , FINISHED , CLEANED}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
<<<<<<< MINE
public SingleComputationCycle(ViewInternal view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_state = State.CREATED;
}
=======
public SingleComputationCycle(View view, long valuationTime) {
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
>>>>>>> YOURS
public ViewInternal getView() {
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
//s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
Unstructured
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
// State:
/** Current state of the cycle */
private enum State {
CREATED, INPUTS_PREPARED, EXECUTING, FINISHED, CLEANED
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
<<<<<<< MINE
public SingleComputationCycle(View view, long valuationTime) {
=======
public SingleComputationCycle(ViewInternal view, long valuationTime) {
>>>>>>> YOURS
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
<<<<<<< MINE
=======
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
>>>>>>> YOURS
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
<<<<<<< MINE
public View getView() {
=======
public ViewInternal getView() {
>>>>>>> YOURS
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
// s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.calc;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.function.LiveDataSourcingFunction;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewComputationResultModelImpl;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewInternal;
import com.opengamma.engine.view.ViewProcessingContext;
import com.opengamma.engine.view.cache.ViewComputationCache;
import com.opengamma.engine.view.calc.stats.GraphExecutorStatisticsGatherer;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.util.ArgumentChecker;
/**
* Holds all data and actions for a single pass through a computation cycle.
* In general, each invocation of {@link ViewRecalculationJob#runOneCycle()}
* will create an instance of this class.
* <p/>
* At the moment, the concurrency metaphor is:
* <ul>
* <li>Each distinct security has its own execution plan</li>
* <li>The cycle will schedule each node in the execution plan sequentially</li>
* <li>If there are shared sub-graphs that aren't security specific, they'll be captured at execution time.</li>
* </ul>
* This is, of course, not optimal, and later on we can fix that.
*
* @author kirk
*/
public class SingleComputationCycle {
private static final Logger s_logger = LoggerFactory.getLogger(SingleComputationCycle.class);
// Injected Inputs:
private final ViewInternal _view;
private final Instant _valuationTime;
private final DependencyGraphExecutor<?> _dependencyGraphExecutor;
private final GraphExecutorStatisticsGatherer _statisticsGatherer;
// State:
/** Current state of the cycle */
private enum State {
CREATED, INPUTS_PREPARED, EXECUTING, FINISHED, CLEANED
}
private State _state;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _startTime;
/**
* Nanoseconds, see System.nanoTime()
*/
private long _endTime;
private final ReentrantReadWriteLock _nodeExecutionLock = new ReentrantReadWriteLock();
private final Set<DependencyNode> _executedNodes = new HashSet<DependencyNode>();
private final Set<DependencyNode> _failedNodes = new HashSet<DependencyNode>();
private final Map<String, ViewComputationCache> _cachesByCalculationConfiguration = new HashMap<String, ViewComputationCache>();
// Outputs:
private final ViewComputationResultModelImpl _resultModel;
<<<<<<< MINE
public SingleComputationCycle(View view, long valuationTime) {
=======
public SingleComputationCycle(ViewInternal view, long valuationTime) {
>>>>>>> YOURS
ArgumentChecker.notNull(view, "view");
_view = view;
_valuationTime = Instant.ofEpochMillis(valuationTime);
_resultModel = new ViewComputationResultModelImpl();
_resultModel.setCalculationConfigurationNames(getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet());
<<<<<<< MINE
=======
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
>>>>>>> YOURS
if (getViewEvaluationModel().getPortfolio() != null) {
_resultModel.setPortfolio(getViewEvaluationModel().getPortfolio());
}
_dependencyGraphExecutor = getProcessingContext().getDependencyGraphExecutorFactory().createExecutor(this);
_statisticsGatherer = getProcessingContext().getGraphExecutorStatisticsGathererProvider().getStatisticsGatherer(view);
_state = State.CREATED;
}
<<<<<<< MINE
public View getView() {
=======
public ViewInternal getView() {
>>>>>>> YOURS
return _view;
}
public Instant getValuationTime() {
return _valuationTime;
}
/**
* @return the viewName
*/
public String getViewName() {
return getView().getName();
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return getView().getProcessingContext();
}
/**
* @return the start time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getStartTime() {
return _startTime;
}
/**
* @return the end time. Nanoseconds, see {@link System#nanoTime()}.
*/
public long getEndTime() {
return _endTime;
}
/**
* @return How many nanoseconds the cycle took
*/
public long getDurationNanos() {
return getEndTime() - getStartTime();
}
/**
* @return the resultModel
*/
public ViewComputationResultModelImpl getResultModel() {
return _resultModel;
}
public ViewComputationCache getComputationCache(String calcConfigName) {
return _cachesByCalculationConfiguration.get(calcConfigName);
}
/**
* @return the viewDefinition
*/
public ViewDefinition getViewDefinition() {
return getView().getDefinition();
}
public DependencyGraphExecutor<?> getDependencyGraphExecutor() {
return _dependencyGraphExecutor;
}
public GraphExecutorStatisticsGatherer getStatisticsGatherer() {
return _statisticsGatherer;
}
public Map<String, ViewComputationCache> getCachesByCalculationConfiguration() {
return Collections.unmodifiableMap(_cachesByCalculationConfiguration);
}
public ViewEvaluationModel getViewEvaluationModel() {
// REVIEW jonathan 2010-08-17 -- when we support re-compilation of views, we need to be more careful about how we
// handle the view evaluation model to ensure that a computation cycle works entirely with the output from a single
// compilation.
return getView().getViewEvaluationModel();
}
public Set<String> getAllCalculationConfigurationNames() {
return getViewEvaluationModel().getDependencyGraphsByConfiguration().keySet();
}
// --------------------------------------------------------------------------
public void prepareInputs() {
if (_state != State.CREATED) {
throw new IllegalStateException("State must be " + State.CREATED);
}
_startTime = System.nanoTime();
getResultModel().setValuationTime(getValuationTime());
createAllCaches();
Set<ValueSpecification> allLiveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
s_logger.debug("Populating {} market data items for snapshot {}", allLiveDataRequirements.size(), getValuationTime());
Set<ValueSpecification> missingLiveData = new HashSet<ValueSpecification>();
for (ValueSpecification liveDataRequirement : allLiveDataRequirements) {
Object data = getProcessingContext().getLiveDataSnapshotProvider().querySnapshot(getValuationTime().toEpochMillisLong(), liveDataRequirement.getRequirementSpecification());
if (data == null) {
s_logger.debug("Unable to load live data value for {} at snapshot {}.", liveDataRequirement, getValuationTime());
missingLiveData.add(liveDataRequirement);
} else {
ComputedValue dataAsValue = new ComputedValue(liveDataRequirement, data);
// s_logger.warn("Live Data Requirement: {}", dataAsValue);
addToAllCaches(dataAsValue);
}
}
if (!missingLiveData.isEmpty()) {
s_logger.warn("Missing {} live data elements: {}", missingLiveData.size(), formatMissingLiveData(missingLiveData));
}
_state = State.INPUTS_PREPARED;
}
protected static String formatMissingLiveData(Set<ValueSpecification> missingLiveData) {
StringBuilder sb = new StringBuilder();
for (ValueSpecification spec : missingLiveData) {
ValueRequirement req = spec.getRequirementSpecification();
sb.append("[").append(req.getValueName()).append(" on ");
sb.append(req.getTargetSpecification().getType());
if (req.getTargetSpecification().getType() == ComputationTargetType.PRIMITIVE) {
sb.append("-").append(req.getTargetSpecification().getIdentifier().getScheme().getName());
}
sb.append(":").append(req.getTargetSpecification().getIdentifier().getValue()).append("] ");
}
return sb.toString();
}
/**
*
*/
private void createAllCaches() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
ViewComputationCache cache = getProcessingContext().getComputationCacheSource().getCache(getViewName(), calcConfigurationName, getValuationTime().toEpochMillisLong());
_cachesByCalculationConfiguration.put(calcConfigurationName, cache);
}
}
/**
* @param dataAsValue
*/
private void addToAllCaches(ComputedValue dataAsValue) {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
getComputationCache(calcConfigurationName).putSharedValue(dataAsValue);
}
}
// --------------------------------------------------------------------------
/**
* Determine which live data inputs have changed between iterations, and:
* <ul>
* <li>Copy over all values that can be demonstrated to be the same from the previous iteration (because no input has changed)
* <li>Only recompute the values that could have changed based on live data inputs
* </ul>
*
* @param previousCycle Previous iteration. It must not have been cleaned yet ({@link #releaseResources()}).
*/
public void computeDelta(SingleComputationCycle previousCycle) {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
if (previousCycle._state != State.FINISHED) {
throw new IllegalArgumentException("State of previous cycle must be " + State.FINISHED);
}
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
ViewComputationCache cache = getComputationCache(calcConfigurationName);
ViewComputationCache previousCache = previousCycle.getComputationCache(calcConfigurationName);
LiveDataDeltaCalculator deltaCalculator = new LiveDataDeltaCalculator(depGraph, cache, previousCache);
deltaCalculator.computeDelta();
s_logger.info("Computed delta for calc conf {}. Of {} nodes, {} require recomputation.", new Object[] {calcConfigurationName, depGraph.getSize(), deltaCalculator.getChangedNodes().size()});
for (DependencyNode unchangedNode : deltaCalculator.getUnchangedNodes()) {
markExecuted(unchangedNode);
for (ValueSpecification spec : unchangedNode.getOutputValues()) {
Object previousValue = previousCache.getValue(spec);
if (previousValue != null) {
cache.putSharedValue(new ComputedValue(spec, previousValue));
}
}
}
}
}
// REVIEW kirk 2009-11-03 -- This is a database kernel. Act accordingly.
public void executePlans() {
if (_state != State.INPUTS_PREPARED) {
throw new IllegalStateException("State must be " + State.INPUTS_PREPARED);
}
_state = State.EXECUTING;
LinkedList<Future<?>> futures = new LinkedList<Future<?>>();
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
s_logger.info("Executing plans for calculation configuration {}", calcConfigurationName);
DependencyGraph depGraph = getExecutableDependencyGraph(calcConfigurationName);
s_logger.info("Submitting {} for execution by {}", depGraph, getDependencyGraphExecutor());
Future<?> future = getDependencyGraphExecutor().execute(depGraph, _statisticsGatherer);
futures.add(future);
}
while (!futures.isEmpty()) {
Future<?> future = futures.poll();
try {
future.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
s_logger.info("Waiting for " + future);
futures.add(future);
} catch (InterruptedException e) {
Thread.interrupted();
s_logger.info("Interrupted while waiting for completion of " + future);
futures.add(future);
} catch (ExecutionException e) {
s_logger.error("Unable to execute dependency graph", e);
// Should we be swallowing this or not?
throw new OpenGammaRuntimeException("Unable to execute dependency graph", e);
}
}
_state = State.FINISHED;
}
private DependencyGraph getDependencyGraph(String calcConfName) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfName);
return depGraph;
}
/**
* @param calcConfName configuration name
* @return A dependency graph with nodes already executed stripped out.
* See {@link #computeDelta} and how it calls {@link #markExecuted}.
*/
protected DependencyGraph getExecutableDependencyGraph(String calcConfName) {
DependencyGraph originalDepGraph = getDependencyGraph(calcConfName);
DependencyGraph dependencyGraph = originalDepGraph.subGraph(new DependencyNodeFilter() {
public boolean accept(DependencyNode node) {
// LiveData functions do not need to be computed.
if (node.getFunction().getFunction() instanceof LiveDataSourcingFunction) {
markExecuted(node);
}
return !isExecuted(node);
}
});
return dependencyGraph;
}
// --------------------------------------------------------------------------
public void populateResultModel() {
Instant resultTimestamp = Instant.nowSystemClock();
getResultModel().setResultTimestamp(resultTimestamp);
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getViewEvaluationModel().getDependencyGraph(calcConfigurationName);
populateResultModel(calcConfigurationName, depGraph);
}
_endTime = System.nanoTime();
}
protected void populateResultModel(String calcConfigurationName, DependencyGraph depGraph) {
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
if (!getViewDefinition().getResultModelDefinition().shouldOutputResult(outputSpec, depGraph)) {
continue;
}
Object value = computationCache.getValue(outputSpec);
if (value != null) {
getResultModel().addValue(calcConfigurationName, new ComputedValue(outputSpec, value));
}
}
}
public void releaseResources() {
if (_state != State.FINISHED) {
throw new IllegalStateException("State must be " + State.FINISHED);
}
if (getViewDefinition().isDumpComputationCacheToDisk()) {
dumpComputationCachesToDisk();
}
getProcessingContext().getLiveDataSnapshotProvider().releaseSnapshot(getValuationTime().toEpochMillisLong());
getProcessingContext().getComputationCacheSource().releaseCaches(getViewName(), getValuationTime().toEpochMillisLong());
_state = State.CLEANED;
}
public void dumpComputationCachesToDisk() {
for (String calcConfigurationName : getAllCalculationConfigurationNames()) {
DependencyGraph depGraph = getDependencyGraph(calcConfigurationName);
ViewComputationCache computationCache = getComputationCache(calcConfigurationName);
TreeMap<String, Object> key2Value = new TreeMap<String, Object>();
for (ValueSpecification outputSpec : depGraph.getOutputValues()) {
Object value = computationCache.getValue(outputSpec);
key2Value.put(outputSpec.toString(), value);
}
try {
File file = File.createTempFile("computation-cache-" + calcConfigurationName + "-", ".txt");
s_logger.info("Dumping cache for calc conf " + calcConfigurationName + " to " + file.getAbsolutePath());
FileWriter writer = new FileWriter(file);
writer.write(key2Value.toString());
writer.close();
} catch (IOException e) {
throw new RuntimeException("Writing cache to file failed", e);
}
}
}
// --------------------------------------------------------------------------
public boolean isExecuted(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _executedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markExecuted(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_executedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
public boolean isFailed(DependencyNode node) {
if (node == null) {
return true;
}
_nodeExecutionLock.readLock().lock();
try {
return _failedNodes.contains(node);
} finally {
_nodeExecutionLock.readLock().unlock();
}
}
public void markFailed(DependencyNode node) {
if (node == null) {
return;
}
_nodeExecutionLock.writeLock().lock();
try {
_failedNodes.add(node);
} finally {
_nodeExecutionLock.writeLock().unlock();
}
}
}
Diff Result
No diff
Case 44 - java_ogplatform.rev_046c4_db953..SwapLegFudgeBuilder.java
Base
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import org.joda.beans.BeanBuilder;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
BeanBuilder<? extends FixedInterestRateLeg> builder = FixedInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FixedInterestRateLeg.meta().rate().name(), msg.getDouble(RATE_FIELD_NAME));
return builder.build();
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
public static final String IS_IBOR_FIELD_NAME = "isIBOR";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, IS_IBOR_FIELD_NAME, object.isIbor());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().ibor().name(), msg.getBoolean(IS_IBOR_FIELD_NAME));
return builder.build();
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import org.joda.beans.BeanBuilder;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
BeanBuilder<? extends FixedInterestRateLeg> builder = FixedInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FixedInterestRateLeg.meta().rate().name(), msg.getDouble(RATE_FIELD_NAME));
return builder.build();
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
public static final String IS_IBOR_FIELD_NAME = "isIBOR";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, IS_IBOR_FIELD_NAME, object.isIbor());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().ibor().name(), msg.getBoolean(IS_IBOR_FIELD_NAME));
return builder.build();
}
}
}
Left
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.setDayCount(msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.setFrequency(msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.setRegionId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.setBusinessDayConvention(msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.setNotional(deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
public static final String IS_IBOR_FIELD_NAME = "isIBOR";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, IS_IBOR_FIELD_NAME, object.isIbor());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.setDayCount(msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.setFrequency(msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.setRegionId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.setBusinessDayConvention(msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.setNotional(deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
public static final String IS_IBOR_FIELD_NAME = "isIBOR";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, IS_IBOR_FIELD_NAME, object.isIbor());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
}
}
}
Right
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import org.joda.beans.BeanBuilder;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
BeanBuilder<? extends FixedInterestRateLeg> builder = FixedInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FixedInterestRateLeg.meta().rate().name(), msg.getDouble(RATE_FIELD_NAME));
return builder.build();
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import org.joda.beans.BeanBuilder;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
BeanBuilder<? extends FixedInterestRateLeg> builder = FixedInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FixedInterestRateLeg.meta().rate().name(), msg.getDouble(RATE_FIELD_NAME));
return builder.build();
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
}
}
}
MergeMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
<<<<<<< MINE
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
=======
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
>>>>>>> YOURS
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
<<<<<<< MINE
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
=======
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
>>>>>>> YOURS
}
}
}
KeepBothMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.setDayCount(msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.setFrequency(msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.setRegionId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.setBusinessDayConvention(msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.setNotional(deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
<<<<<<< MINE
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
=======
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
>>>>>>> YOURS
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.setDayCount(msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.setFrequency(msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.setRegionId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.setBusinessDayConvention(msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.setNotional(deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
}
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
<<<<<<< MINE
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
=======
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
>>>>>>> YOURS
}
}
}
Safe
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
<<<<<<< MINE
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
}
=======
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.setDayCount(msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.setFrequency(msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.setRegionId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.setBusinessDayConvention(msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.setNotional(deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
<<<<<<< MINE
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
=======
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
>>>>>>> YOURS
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
<<<<<<< MINE
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
}
=======
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.setDayCount(msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.setFrequency(msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.setRegionId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.setBusinessDayConvention(msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.setNotional(deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
<<<<<<< MINE
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
=======
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
>>>>>>> YOURS
}
}
}
Unstructured
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
<<<<<<< MINE
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.setDayCount(msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.setFrequency(msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.setRegionId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.setBusinessDayConvention(msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.setNotional(deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
=======
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
>>>>>>> YOURS
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
<<<<<<< MINE
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
=======
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
>>>>>>> YOURS
}
}
}/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.security.swap;
import org.fudgemsg.FudgeMsg;
import org.fudgemsg.MutableFudgeMsg;
import org.fudgemsg.mapping.FudgeBuilder;
import org.fudgemsg.mapping.FudgeBuilderFor;
import org.fudgemsg.mapping.FudgeDeserializer;
import org.fudgemsg.mapping.FudgeSerializer;
import com.opengamma.financial.convention.businessday.BusinessDayConvention;
import com.opengamma.financial.convention.daycount.DayCount;
import com.opengamma.financial.convention.frequency.Frequency;
import com.opengamma.id.ExternalIdFudgeBuilder;
import com.opengamma.util.fudgemsg.AbstractFudgeBuilder;
/**
* A Fudge builder for {@code SwapLeg} implementations.
*/
public class SwapLegFudgeBuilder extends AbstractFudgeBuilder {
/** Field name. */
public static final String DAY_COUNT_FIELD_NAME = "dayCount";
/** Field name. */
public static final String FREQUENCY_FIELD_NAME = "frequency";
/** Field name. */
public static final String REGION_IDENTIFIER_FIELD_NAME = "regionIdentifier";
/** Field name. */
public static final String BUSINESS_DAY_CONVENTION_FIELD_NAME = "businessDayConvention";
/** Field name. */
public static final String NOTIONAL_FIELD_NAME = "notional";
/** Field name. */
public static final String IS_EOM_FIELD_NAME = "isEOM";
public static void toFudgeMsg(FudgeSerializer serializer, SwapLeg object, final MutableFudgeMsg msg) {
addToMessage(msg, DAY_COUNT_FIELD_NAME, object.getDayCount());
addToMessage(msg, FREQUENCY_FIELD_NAME, object.getFrequency());
addToMessage(msg, REGION_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getRegionId()));
addToMessage(msg, BUSINESS_DAY_CONVENTION_FIELD_NAME, object.getBusinessDayConvention());
addToMessage(serializer, msg, NOTIONAL_FIELD_NAME, object.getNotional(), Notional.class);
addToMessage(msg, IS_EOM_FIELD_NAME, object.isEom());
}
<<<<<<< MINE
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, SwapLeg object) {
object.setDayCount(msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.setFrequency(msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.setRegionId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.setBusinessDayConvention(msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.setNotional(deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
=======
public static void fromFudgeMsg(FudgeDeserializer deserializer, FudgeMsg msg, BeanBuilder<? extends SwapLeg> object) {
object.set(SwapLeg.meta().dayCount().name(), msg.getValue(DayCount.class, DAY_COUNT_FIELD_NAME));
object.set(SwapLeg.meta().frequency().name(), msg.getValue(Frequency.class, FREQUENCY_FIELD_NAME));
object.set(SwapLeg.meta().regionId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(REGION_IDENTIFIER_FIELD_NAME)));
object.set(SwapLeg.meta().businessDayConvention().name(), msg.getValue(BusinessDayConvention.class, BUSINESS_DAY_CONVENTION_FIELD_NAME));
object.set(SwapLeg.meta().notional().name(), deserializer.fudgeMsgToObject(Notional.class, msg.getMessage(NOTIONAL_FIELD_NAME)));
object.set(SwapLeg.meta().eom().name(), msg.getBoolean(IS_EOM_FIELD_NAME));
>>>>>>> YOURS
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FixedInterestRateLeg}.
*/
@FudgeBuilderFor(FixedInterestRateLeg.class)
public static class FixedInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FixedInterestRateLeg> {
/** Field name. */
public static final String RATE_FIELD_NAME = "rate";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FixedInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, RATE_FIELD_NAME, object.getRate());
return msg;
}
@Override
public FixedInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
FixedInterestRateLeg floatingInterestRateLeg = new FixedInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setRate(msg.getDouble(RATE_FIELD_NAME));
return floatingInterestRateLeg;
}
}
//-------------------------------------------------------------------------
/**
* A Fudge builder for {@code FloatingInterestRateLeg}.
*/
@FudgeBuilderFor(FloatingInterestRateLeg.class)
public static class FloatingInterestRateLegBuilder extends SwapLegFudgeBuilder implements FudgeBuilder<FloatingInterestRateLeg> {
/** Field name. */
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME = "floatingReferenceRateIdentifier";
/** Field name. */
public static final String INITIAL_FLOATING_RATE_FIELD_NAME = "initialFloatingRate";
/** Field name. */
public static final String SPREAD_FIELD_NAME = "spread";
/** Field name. */
public static final String FLOATING_RATE_TYPE_FIELD_NAME = "floatingRateType";
@Override
public MutableFudgeMsg buildMessage(FudgeSerializer serializer, FloatingInterestRateLeg object) {
final MutableFudgeMsg msg = serializer.newMessage();
SwapLegFudgeBuilder.toFudgeMsg(serializer, object, msg);
addToMessage(msg, FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME, ExternalIdFudgeBuilder.toFudgeMsg(serializer, object.getFloatingReferenceRateId()));
addToMessage(msg, INITIAL_FLOATING_RATE_FIELD_NAME, object.getInitialFloatingRate());
addToMessage(msg, SPREAD_FIELD_NAME, object.getSpread());
addToMessage(msg, FLOATING_RATE_TYPE_FIELD_NAME, object.getFloatingRateType().name());
return msg;
}
@Override
public FloatingInterestRateLeg buildObject(FudgeDeserializer deserializer, FudgeMsg msg) {
<<<<<<< MINE
FloatingInterestRateLeg floatingInterestRateLeg = new FloatingInterestRateLeg();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, floatingInterestRateLeg);
floatingInterestRateLeg.setFloatingReferenceRateId(ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
floatingInterestRateLeg.setInitialFloatingRate(msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
floatingInterestRateLeg.setSpread(msg.getDouble(SPREAD_FIELD_NAME));
floatingInterestRateLeg.setIbor(msg.getBoolean(IS_IBOR_FIELD_NAME));
return floatingInterestRateLeg;
=======
BeanBuilder<? extends FloatingInterestRateLeg> builder = FloatingInterestRateLeg.meta().builder();
SwapLegFudgeBuilder.fromFudgeMsg(deserializer, msg, builder);
builder.set(FloatingInterestRateLeg.meta().floatingReferenceRateId().name(), ExternalIdFudgeBuilder.fromFudgeMsg(deserializer, msg.getMessage(FLOATING_REFERENCE_RATE_IDENTIFIER_FIELD_NAME)));
builder.set(FloatingInterestRateLeg.meta().initialFloatingRate().name(), msg.getDouble(INITIAL_FLOATING_RATE_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().spread().name(), msg.getDouble(SPREAD_FIELD_NAME));
builder.set(FloatingInterestRateLeg.meta().floatingRateType().name(), FloatingRateType.valueOf(msg.getString(FLOATING_RATE_TYPE_FIELD_NAME)));
return builder.build();
>>>>>>> YOURS
}
}
}
Diff Result
No diff
Case 45 - java_ogplatform.rev_093b1_84fff..EquityVarianceSwapFunction.java
Base
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.UniqueId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private final String _curveDefinitionName;
private final String _surfaceDefinitionName;
@SuppressWarnings("unused")
private final String _forwardCalculationMethod;
private EquityVarianceSwapConverter _converter; // set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName, final String curveDefinitionName, final String surfaceDefinitionName, final String forwardCalculationMethod) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(curveDefinitionName, "curve definition name");
Validate.notNull(surfaceDefinitionName, "surface definition name");
Validate.notNull(forwardCalculationMethod, "forward calculation method");
_valueRequirementName = valueRequirementName;
_curveDefinitionName = curveDefinitionName;
_surfaceDefinitionName = surfaceDefinitionName;
_forwardCalculationMethod = forwardCalculationMethod;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
Validate.isTrue(discountFactor != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
// 3. Compute and return the value (ComputedValue)
return computeValues(target, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ComputationTarget target, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
}
protected String getCurveDefinitionName() {
return _curveDefinitionName;
}
protected String getSurfaceName() {
return _surfaceDefinitionName;
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(id.getScheme().getName(), id.getValue()));
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION")
.get();
final ExternalId id = security.getSpotUnderlyingId();
final UniqueId newId = id.getScheme().equals(ExternalSchemes.BLOOMBERG_TICKER) ? UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), id.getValue()) :
UniqueId.of(id.getScheme().getName(), id.getValue());
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.SECURITY;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getSecurity() instanceof EquityVarianceSwapSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
ValueRequirement requirement;
requirements.add(getDiscountCurveRequirement(security));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security));
requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.UniqueId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private final String _curveDefinitionName;
private final String _surfaceDefinitionName;
@SuppressWarnings("unused")
private final String _forwardCalculationMethod;
private EquityVarianceSwapConverter _converter; // set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName, final String curveDefinitionName, final String surfaceDefinitionName, final String forwardCalculationMethod) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(curveDefinitionName, "curve definition name");
Validate.notNull(surfaceDefinitionName, "surface definition name");
Validate.notNull(forwardCalculationMethod, "forward calculation method");
_valueRequirementName = valueRequirementName;
_curveDefinitionName = curveDefinitionName;
_surfaceDefinitionName = surfaceDefinitionName;
_forwardCalculationMethod = forwardCalculationMethod;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
Validate.isTrue(discountFactor != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
// 3. Compute and return the value (ComputedValue)
return computeValues(target, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ComputationTarget target, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
}
protected String getCurveDefinitionName() {
return _curveDefinitionName;
}
protected String getSurfaceName() {
return _surfaceDefinitionName;
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(id.getScheme().getName(), id.getValue()));
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION")
.get();
final ExternalId id = security.getSpotUnderlyingId();
final UniqueId newId = id.getScheme().equals(ExternalSchemes.BLOOMBERG_TICKER) ? UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), id.getValue()) :
UniqueId.of(id.getScheme().getName(), id.getValue());
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.SECURITY;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getSecurity() instanceof EquityVarianceSwapSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
ValueRequirement requirement;
requirements.add(getDiscountCurveRequirement(security));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security));
requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
Left
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.UniqueId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter; // set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2); //TODO remove me - just for testing
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(id.getScheme().getName(), id.getValue()));
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
final ExternalId id = security.getSpotUnderlyingId();
final UniqueId newId = id.getScheme().equals(ExternalSchemes.BLOOMBERG_TICKER) ? UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), id.getValue()) :
UniqueId.of(id.getScheme().getName(), id.getValue());
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.SECURITY;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getSecurity() instanceof EquityVarianceSwapSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.UniqueId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter; // set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2); //TODO remove me - just for testing
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(id.getScheme().getName(), id.getValue()));
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
final ExternalId id = security.getSpotUnderlyingId();
final UniqueId newId = id.getScheme().equals(ExternalSchemes.BLOOMBERG_TICKER) ? UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), id.getValue()) :
UniqueId.of(id.getScheme().getName(), id.getValue());
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.SECURITY;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getSecurity() instanceof EquityVarianceSwapSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
Right
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private final String _curveDefinitionName;
private final String _surfaceDefinitionName;
@SuppressWarnings("unused")
private final String _forwardCalculationMethod;
private EquityVarianceSwapConverter _converter; // set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName, final String curveDefinitionName, final String surfaceDefinitionName, final String forwardCalculationMethod) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(curveDefinitionName, "curve definition name");
Validate.notNull(surfaceDefinitionName, "surface definition name");
Validate.notNull(forwardCalculationMethod, "forward calculation method");
_valueRequirementName = valueRequirementName;
_curveDefinitionName = curveDefinitionName;
_surfaceDefinitionName = surfaceDefinitionName;
_forwardCalculationMethod = forwardCalculationMethod;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
Validate.isTrue(discountFactor != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
// 3. Compute and return the value (ComputedValue)
return computeValues(target, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ComputationTarget target, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
}
protected String getCurveDefinitionName() {
return _curveDefinitionName;
}
protected String getSurfaceName() {
return _surfaceDefinitionName;
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION")
.get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
ValueRequirement requirement;
requirements.add(getDiscountCurveRequirement(security));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security));
requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private final String _curveDefinitionName;
private final String _surfaceDefinitionName;
@SuppressWarnings("unused")
private final String _forwardCalculationMethod;
private EquityVarianceSwapConverter _converter; // set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName, final String curveDefinitionName, final String surfaceDefinitionName, final String forwardCalculationMethod) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(curveDefinitionName, "curve definition name");
Validate.notNull(surfaceDefinitionName, "surface definition name");
Validate.notNull(forwardCalculationMethod, "forward calculation method");
_valueRequirementName = valueRequirementName;
_curveDefinitionName = curveDefinitionName;
_surfaceDefinitionName = surfaceDefinitionName;
_forwardCalculationMethod = forwardCalculationMethod;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
Validate.isTrue(discountFactor != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
// 3. Compute and return the value (ComputedValue)
return computeValues(target, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ComputationTarget target, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
}
protected String getCurveDefinitionName() {
return _curveDefinitionName;
}
protected String getSurfaceName() {
return _surfaceDefinitionName;
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION")
.get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
ValueRequirement requirement;
requirements.add(getDiscountCurveRequirement(security));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security));
requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
MergeMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter;
// set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
//TODO remove me - just for testing
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2);
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) {
//TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
//TODO change this
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve());
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties().with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).withAny(ValuePropertyNames.CURVE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties().with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION").get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter;
// set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
//TODO remove me - just for testing
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2);
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) {
//TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
//TODO change this
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve());
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties().with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).withAny(ValuePropertyNames.CURVE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties().with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION").get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
KeepBothMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter;
// set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
//TODO remove me - just for testing
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2);
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) {
//TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
//TODO change this
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve());
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties().with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).withAny(ValuePropertyNames.CURVE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties().with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, surfaceName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION).get();
final ExternalId id = security.getSpotUnderlyingId();
final UniqueId newId = id.getScheme().equals(ExternalSchemes.BLOOMBERG_TICKER) ? UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), id.getValue()) : UniqueId.of(id.getScheme().getName(), id.getValue());
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION").get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter;
// set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
//TODO remove me - just for testing
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2);
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) {
//TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
//TODO change this
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve());
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties().with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).withAny(ValuePropertyNames.CURVE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties().with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, surfaceName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION).get();
final ExternalId id = security.getSpotUnderlyingId();
final UniqueId newId = id.getScheme().equals(ExternalSchemes.BLOOMBERG_TICKER) ? UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), id.getValue()) : UniqueId.of(id.getScheme().getName(), id.getValue());
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION").get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
Safe
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter;
// set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2); //TODO remove me - just for testing
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
final ExternalId id = security.getSpotUnderlyingId();
final UniqueId newId = id.getScheme().equals(ExternalSchemes.BLOOMBERG_TICKER) ? UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), id.getValue()) :
UniqueId.of(id.getScheme().getName(), id.getValue());
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
<<<<<<< MINE
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
}
=======
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
>>>>>>> YOURS
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION")
.get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter;
// set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2); //TODO remove me - just for testing
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
final ExternalId id = security.getSpotUnderlyingId();
final UniqueId newId = id.getScheme().equals(ExternalSchemes.BLOOMBERG_TICKER) ? UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), id.getValue()) :
UniqueId.of(id.getScheme().getName(), id.getValue());
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
<<<<<<< MINE
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
}
=======
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
>>>>>>> YOURS
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, _surfaceDefinitionName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, "EQUITY_OPTION")
.get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
Unstructured
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter; // set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2); //TODO remove me - just for testing
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
<<<<<<< MINE
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
=======
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
>>>>>>> YOURS
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.definition.VarianceSwapDefinition;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceStrike;
import com.opengamma.analytics.financial.model.volatility.surface.VolatilitySurface;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.equity.EquityVarianceSwapConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.ArgumentChecker;
/**
* Base class for Functions for EquityVarianceSwapSecurity. These functions price using Static Replication
*/
public abstract class EquityVarianceSwapFunction extends AbstractFunction.NonCompiledInvoker {
private final String _valueRequirementName;
private EquityVarianceSwapConverter _converter; // set in init()
/** CalculationMethod constraint used in configuration to choose this model */
public static final String CALCULATION_METHOD = "StaticReplication";
/** Method may be Strike or Moneyness TODO Confirm */
public static final String STRIKE_PARAMETERIZATION_METHOD = "StrikeParameterizationMethod";
public EquityVarianceSwapFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final String curveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
// 1. Build the analytic derivative to be priced
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime().minusYears(2); //TODO remove me - just for testing
final VarianceSwapDefinition defn = _converter.visitEquityVarianceSwapTrade(security);
final HistoricalTimeSeries timeSeries = (HistoricalTimeSeries) inputs.getValue(ValueRequirementNames.HISTORICAL_TIME_SERIES);
final VarianceSwap deriv = defn.toDerivative(now, timeSeries.getTimeSeries());
// 2. Build up the market data bundle
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(security, surfaceName));
if (volSurfaceObject == null) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final VolatilitySurface volSurface = (VolatilitySurface) volSurfaceObject;
//TODO no choice of other surfaces
final BlackVolatilitySurface<?> blackVolSurf = new BlackVolatilitySurfaceStrike(volSurface.getSurface());
final Object discountObject = inputs.getValue(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
if (discountObject == null) {
throw new OpenGammaRuntimeException("Could not get Discount Curve");
}
if (!(discountObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve discountCurve = (YieldCurve) discountObject;
final Object spotObject = inputs.getValue(getSpotRequirement(security));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
final double expiry = TimeCalculator.getTimeBetween(executionContext.getValuationClock().zonedDateTime(), security.getLastObservationDate());
final double discountFactor = discountCurve.getDiscountFactor(expiry);
ArgumentChecker.isTrue(Double.doubleToLongBits(discountFactor) != 0, "The discount curve has returned a zero value for a discount bond. Check rates.");
final ForwardCurve forwardCurve = new ForwardCurve(spot, discountCurve.getCurve()); //TODO change this
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, discountCurve, forwardCurve);
final ValueSpecification resultSpec = getValueSpecification(target, curveName, curveCalculationConfig, surfaceName);
// 3. Compute and return the value (ComputedValue)
return computeValues(resultSpec, inputs, deriv, market);
}
protected abstract Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market);
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final ValueProperties properties = getValueProperties(target);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final ValueProperties properties = getValueProperties(target, curveName, curveCalculationConfig, surfaceName);
return new ValueSpecification(_valueRequirementName, target.toSpecification(), properties);
}
protected ValueProperties getValueProperties(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE).get();
}
protected ValueProperties getValueProperties(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
return createValueProperties()
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName).get();
}
private ValueRequirement getSpotRequirement(final EquityVarianceSwapSecurity security) {
final ExternalId id = security.getSpotUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, id);
}
// Note that createValueProperties is _not_ used - use will mean the engine can't find the requirement
<<<<<<< MINE
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security, final String curveName, final String curveCalculationConfig) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
=======
private ValueRequirement getDiscountCurveRequirement(final EquityVarianceSwapSecurity security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, _curveDefinitionName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(security.getCurrency()), properties);
>>>>>>> YOURS
}
private ValueRequirement getVolatilitySurfaceRequirement(final EquityVarianceSwapSecurity security, final String surfaceName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
ExternalId id = security.getSpotUnderlyingId();
if (ExternalSchemes.BLOOMBERG_TICKER.equals(id.getScheme())) {
id = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, id.getValue());
}
return new ValueRequirement(ValueRequirementNames.INTERPOLATED_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, id, properties);
}
private ValueRequirement getTimeSeriesRequirement(final FunctionCompilationContext context, final EquityVarianceSwapSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getSpotUnderlyingId().toBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE, DateConstraint.EARLIEST_START, true, DateConstraint.VALUATION_TIME, true);
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
_converter = new EquityVarianceSwapConverter(holidaySource);
}
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_VARIANCE_SWAP_SECURITY;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<String> curveNames = constraints.getValues(ValuePropertyNames.CURVE);
if (curveNames == null || curveNames.size() != 1) {
return null;
}
final Set<String> curveCalculationConfigs = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigs == null || curveCalculationConfigs.size() != 1) {
return null;
}
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String curveName = Iterables.getOnlyElement(curveNames);
final String curveCalculationConfig = Iterables.getOnlyElement(curveCalculationConfigs);
final String surfaceName = Iterables.getOnlyElement(surfaceNames);
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
requirements.add(getDiscountCurveRequirement(security, curveName, curveCalculationConfig));
requirements.add(getSpotRequirement(security));
requirements.add(getVolatilitySurfaceRequirement(security, surfaceName));
final ValueRequirement requirement = getTimeSeriesRequirement(context, security);
if (requirement == null) {
return null;
}
requirements.add(requirement);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(getValueSpecification(target));
}
}
Diff Result
No diff
Case 46 - java_ogplatform.rev_093b1_84fff..EquityVarianceSwapYieldCurveNodeSensitivityFunction.java
Base
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction(String curveDefinitionName, String surfaceDefinitionName, String forwardCalculationMethod) {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, curveDefinitionName, surfaceDefinitionName, forwardCalculationMethod);
}
@Override
protected Set<ComputedValue> computeValues(final ComputationTarget target, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency()));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final ValueSpecification resultSpec = getValueSpecification(target);
YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(getCurveDefinitionName(), market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(getCurveDefinitionName(), curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity())));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, getCurveDefinitionName())
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction(String curveDefinitionName, String surfaceDefinitionName, String forwardCalculationMethod) {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, curveDefinitionName, surfaceDefinitionName, forwardCalculationMethod);
}
@Override
protected Set<ComputedValue> computeValues(final ComputationTarget target, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency()));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final ValueSpecification resultSpec = getValueSpecification(target);
YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(getCurveDefinitionName(), market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(getCurveDefinitionName(), curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity())));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, getCurveDefinitionName())
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
}
}
Left
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
}
}
Right
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction(String curveDefinitionName, String surfaceDefinitionName, String forwardCalculationMethod) {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, curveDefinitionName, surfaceDefinitionName, forwardCalculationMethod);
}
@Override
protected Set<ComputedValue> computeValues(final ComputationTarget target, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency()));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final ValueSpecification resultSpec = getValueSpecification(target);
YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(getCurveDefinitionName(), market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(getCurveDefinitionName(), curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity())));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, getCurveDefinitionName())
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction(String curveDefinitionName, String surfaceDefinitionName, String forwardCalculationMethod) {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, curveDefinitionName, surfaceDefinitionName, forwardCalculationMethod);
}
@Override
protected Set<ComputedValue> computeValues(final ComputationTarget target, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency()));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final ValueSpecification resultSpec = getValueSpecification(target);
YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(getCurveDefinitionName(), market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(getCurveDefinitionName(), curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity())));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, getCurveDefinitionName())
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
}
}
MergeMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties().withAny(ValuePropertyNames.CURVE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).withAny(ValuePropertyNames.SURFACE).with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties().with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).with(ValuePropertyNames.SURFACE, surfaceName).with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties().withAny(ValuePropertyNames.CURVE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).withAny(ValuePropertyNames.SURFACE).with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties().with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).with(ValuePropertyNames.SURFACE, surfaceName).with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
}
}
KeepBothMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties().withAny(ValuePropertyNames.CURVE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).withAny(ValuePropertyNames.SURFACE).with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties().with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).with(ValuePropertyNames.SURFACE, surfaceName).with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties().withAny(ValuePropertyNames.CURVE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).withAny(ValuePropertyNames.SURFACE).with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties().with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig).with(ValuePropertyNames.SURFACE, surfaceName).with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode()).with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD).get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
}
}
Safe
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
<<<<<<< MINE
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
}
=======
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
}
>>>>>>> YOURS
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
<<<<<<< MINE
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
}
=======
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
}
>>>>>>> YOURS
}
Unstructured
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
<<<<<<< MINE
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
=======
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
>>>>>>> YOURS
}
}/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.variance;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.variance.VarianceSwapSensitivityCalculator;
import com.opengamma.analytics.financial.equity.variance.derivative.VarianceSwap;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.equity.EquityVarianceSwapSecurity;
import com.opengamma.util.money.Currency;
/**
* Calculates the sensitivity of the present value (PV) to a change in the funding rate from valuation to settlement.
* In this formulation, Rates enter the pricing of a VarianceSwap in two places: in the discounting and forward projection.<p>
* i.e. We are using the rates to infer the forward: spot / Z(t,T).
*/
public class EquityVarianceSwapYieldCurveNodeSensitivityFunction extends EquityVarianceSwapFunction {
private static final Logger s_logger = LoggerFactory.getLogger(EquityVarianceSwapYieldCurveNodeSensitivityFunction.class);
private static final VarianceSwapSensitivityCalculator CALCULATOR = VarianceSwapSensitivityCalculator.getInstance();
public EquityVarianceSwapYieldCurveNodeSensitivityFunction() {
super(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES);
}
@Override
protected Set<ComputedValue> computeValues(final ValueSpecification resultSpec, final FunctionInputs inputs, final VarianceSwap derivative, final StaticReplicationDataBundle market) {
final DoubleMatrix1D sensitivities = CALCULATOR.calcDeltaBucketed(derivative, market);
final String curveName = resultSpec.getProperty(ValuePropertyNames.CURVE);
final Object curveSpecObject = inputs.getValue(getCurveSpecRequirement(derivative.getCurrency(), curveName));
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Curve specification was null");
}
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final YieldCurveBundle curveMap = new YieldCurveBundle();
curveMap.setCurve(curveName, market.getDiscountCurve());
return YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, curveMap, sensitivities, curveSpec, resultSpec);
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> result = super.getRequirements(context, target, desiredValue);
if (result == null) {
return null;
}
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (curves == null || curves.size() != 1) {
s_logger.error("Must specify a curve name");
return null;
}
final String curveName = Iterables.getOnlyElement(curves);
result.add(getCurveSpecRequirement(FinancialSecurityUtils.getCurrency(target.getSecurity()), curveName));
return result;
}
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.SURFACE)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
<<<<<<< MINE
@Override
protected ValueSpecification getValueSpecification(final ComputationTarget target, final String curveName, final String curveCalculationConfig, final String surfaceName) {
final EquityVarianceSwapSecurity security = (EquityVarianceSwapSecurity) target.getSecurity();
final ValueProperties properties = createValueProperties()
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.CURVE_CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CURRENCY, security.getCurrency().getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, CALCULATION_METHOD)
.get();
return new ValueSpecification(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES, target.toSpecification(), properties);
}
private ValueRequirement getCurveSpecRequirement(final Currency currency, final String curveName) {
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, curveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties);
=======
private ValueRequirement getCurveSpecRequirement(final Currency currency) {
ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, getCurveDefinitionName()).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties);
>>>>>>> YOURS
}
}
Diff Result
No diff
Case 47 - java_ogplatform.rev_1550f_57348..TimeSeriesSearchRequest.java
Base
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest<T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The timeseries identifier for loading specific data points range
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* Identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
*/
@PropertyDefinition
private String _identifierValue;
/**
* List of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
*/
@PropertyDefinition
private final List<Identifier> _identifiers = new ArrayList<Identifier>();
/**
* The dataSource, null to search all dataSource.
*/
@PropertyDefinition
private String _dataSource;
/**
* The dataProvider, null to search all dataProvider.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The dataField, null to search all dataField.
*/
@PropertyDefinition
private String _dataField;
/**
* The observationTime, null to search all observationTime
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in datastore.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search till end date in datastore.
*/
@PropertyDefinition
private T _end;
/**
* Set to true if to load datapoints, otherwise return just meta data
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true if to load the start and end date for timeseries
*/
@PropertyDefinition
private boolean _loadDates;
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((List<Identifier>) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((boolean) (Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((boolean) (Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the timeseries identifier for loading specific data points range
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the timeseries identifier for loading specific data points range
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @return the value of the property
*/
public List<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @param identifiers the new value of the property
*/
public void setIdentifiers(Collection<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* - no wildcards are allowed
* @return the property, not null
*/
public final Property<List<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataSource, null to search all dataSource.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the dataSource, null to search all dataSource.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataProvider, null to search all dataProvider.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the dataProvider, null to search all dataProvider.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataField, null to search all dataField.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the dataField, null to search all dataField.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observationTime, null to search all observationTime
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observationTime, null to search all observationTime
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in datastore.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in datastore.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search till end date in datastore.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search till end date in datastore.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true if to load datapoints, otherwise return just meta data
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true if to load datapoints, otherwise return just meta data
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true if to load the start and end date for timeseries
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true if to load the start and end date for timeseries
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta<T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("unchecked")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings("unchecked")
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings("unchecked")
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<List<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest<T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The timeseries identifier for loading specific data points range
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* Identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
*/
@PropertyDefinition
private String _identifierValue;
/**
* List of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
*/
@PropertyDefinition
private final List<Identifier> _identifiers = new ArrayList<Identifier>();
/**
* The dataSource, null to search all dataSource.
*/
@PropertyDefinition
private String _dataSource;
/**
* The dataProvider, null to search all dataProvider.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The dataField, null to search all dataField.
*/
@PropertyDefinition
private String _dataField;
/**
* The observationTime, null to search all observationTime
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in datastore.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search till end date in datastore.
*/
@PropertyDefinition
private T _end;
/**
* Set to true if to load datapoints, otherwise return just meta data
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true if to load the start and end date for timeseries
*/
@PropertyDefinition
private boolean _loadDates;
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((List<Identifier>) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((boolean) (Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((boolean) (Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the timeseries identifier for loading specific data points range
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the timeseries identifier for loading specific data points range
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @return the value of the property
*/
public List<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @param identifiers the new value of the property
*/
public void setIdentifiers(Collection<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* - no wildcards are allowed
* @return the property, not null
*/
public final Property<List<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataSource, null to search all dataSource.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the dataSource, null to search all dataSource.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataProvider, null to search all dataProvider.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the dataProvider, null to search all dataProvider.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataField, null to search all dataField.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the dataField, null to search all dataField.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observationTime, null to search all observationTime
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observationTime, null to search all observationTime
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in datastore.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in datastore.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search till end date in datastore.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search till end date in datastore.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true if to load datapoints, otherwise return just meta data
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true if to load datapoints, otherwise return just meta data
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true if to load the start and end date for timeseries
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true if to load the start and end date for timeseries
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta<T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("unchecked")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings("unchecked")
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings("unchecked")
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<List<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
Left
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest<T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final List<Identifier> _identifiers = new ArrayList<Identifier>();
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((List<Identifier>) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public List<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
public void setIdentifiers(List<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<List<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta<T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<List<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest<T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final List<Identifier> _identifiers = new ArrayList<Identifier>();
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((List<Identifier>) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public List<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
public void setIdentifiers(List<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<List<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta<T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<List<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
Right
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest<T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The timeseries identifier for loading specific data points range
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* Identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
*/
@PropertyDefinition
private String _identifierValue;
/**
* List of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The dataSource, null to search all dataSource.
*/
@PropertyDefinition
private String _dataSource;
/**
* The dataProvider, null to search all dataProvider.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The dataField, null to search all dataField.
*/
@PropertyDefinition
private String _dataField;
/**
* The observationTime, null to search all observationTime
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in datastore.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search till end date in datastore.
*/
@PropertyDefinition
private T _end;
/**
* Set to true if to load datapoints, otherwise return just meta data
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true if to load the start and end date for timeseries
*/
@PropertyDefinition
private boolean _loadDates;
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((boolean) (Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((boolean) (Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the timeseries identifier for loading specific data points range
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the timeseries identifier for loading specific data points range
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @param identifiers the new value of the property
*/
public void setIdentifiers(Set<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* - no wildcards are allowed
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataSource, null to search all dataSource.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the dataSource, null to search all dataSource.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataProvider, null to search all dataProvider.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the dataProvider, null to search all dataProvider.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataField, null to search all dataField.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the dataField, null to search all dataField.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observationTime, null to search all observationTime
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observationTime, null to search all observationTime
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in datastore.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in datastore.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search till end date in datastore.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search till end date in datastore.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true if to load datapoints, otherwise return just meta data
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true if to load datapoints, otherwise return just meta data
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true if to load the start and end date for timeseries
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true if to load the start and end date for timeseries
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta<T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("unchecked")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings("unchecked")
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings("unchecked")
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest<T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The timeseries identifier for loading specific data points range
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* Identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
*/
@PropertyDefinition
private String _identifierValue;
/**
* List of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The dataSource, null to search all dataSource.
*/
@PropertyDefinition
private String _dataSource;
/**
* The dataProvider, null to search all dataProvider.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The dataField, null to search all dataField.
*/
@PropertyDefinition
private String _dataField;
/**
* The observationTime, null to search all observationTime
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in datastore.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search till end date in datastore.
*/
@PropertyDefinition
private T _end;
/**
* Set to true if to load datapoints, otherwise return just meta data
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true if to load the start and end date for timeseries
*/
@PropertyDefinition
private boolean _loadDates;
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((boolean) (Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((boolean) (Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the timeseries identifier for loading specific data points range
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the timeseries identifier for loading specific data points range
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets identifier value, will match against the <b>value</b> of the identifiers
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* (see Identifier.getValue());
* wildcards allowed;
* will not match on the <b>key</b> of any of the identifiers;
* null to search all identifiers
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @param identifiers the new value of the property
*/
public void setIdentifiers(Set<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* - no wildcards are allowed
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataSource, null to search all dataSource.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the dataSource, null to search all dataSource.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataProvider, null to search all dataProvider.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the dataProvider, null to search all dataProvider.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the dataField, null to search all dataField.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the dataField, null to search all dataField.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observationTime, null to search all observationTime
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observationTime, null to search all observationTime
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in datastore.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in datastore.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search till end date in datastore.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search till end date in datastore.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true if to load datapoints, otherwise return just meta data
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true if to load datapoints, otherwise return just meta data
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true if to load the start and end date for timeseries
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true if to load the start and end date for timeseries
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta<T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("unchecked")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings("unchecked")
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings("unchecked")
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings("unchecked")
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
MergeMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest <T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
<<<<<<< MINE
public void setIdentifiers(List<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
=======
public void setIdentifiers(Set<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
>>>>>>> YOURS
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta <T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
<<<<<<< MINE
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
=======
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
>>>>>>> YOURS
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest <T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
<<<<<<< MINE
public void setIdentifiers(List<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
=======
public void setIdentifiers(Set<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
>>>>>>> YOURS
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta <T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
<<<<<<< MINE
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
=======
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
>>>>>>> YOURS
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest <T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
public void setIdentifiers(List<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Sets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @param identifiers the new value of the property
*/
public void setIdentifiers(Set<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta <T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
<<<<<<< MINE
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
=======
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
>>>>>>> YOURS
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest <T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
public void setIdentifiers(List<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Sets list of Identifiers to search. Unlike _identifierValue, requires exact match
* - no wildcards are allowed
* @param identifiers the new value of the property
*/
public void setIdentifiers(Set<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta <T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
<<<<<<< MINE
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
=======
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
>>>>>>> YOURS
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
}
Safe
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest <T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
<<<<<<< MINE
public void setIdentifiers(List<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
=======
public void setIdentifiers(Set<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
>>>>>>> YOURS
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta <T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
<<<<<<< MINE
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
=======
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
>>>>>>> YOURS
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest <T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
<<<<<<< MINE
public void setIdentifiers(List<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
=======
public void setIdentifiers(Set<Identifier> identifiers) {
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
>>>>>>> YOURS
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta <T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
<<<<<<< MINE
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
=======
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
>>>>>>> YOURS
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
}
Unstructured
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
<<<<<<< MINE
import java.util.ArrayList;
=======
>>>>>>> YOURS
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest<T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
<<<<<<< MINE
public void setIdentifiers(List<Identifier> identifiers) {
=======
public void setIdentifiers(Set<Identifier> identifiers) {
>>>>>>> YOURS
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta<T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
<<<<<<< MINE
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
=======
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
>>>>>>> YOURS
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.timeseries;
<<<<<<< MINE
import java.util.ArrayList;
=======
>>>>>>> YOURS
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.LocalDate;
import org.joda.beans.BeanDefinition;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.BasicMetaBean;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import com.opengamma.id.Identifier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.db.PagingRequest;
/**
* Request for searching for TimeSeries.
*
* @param <T> LocalDate/java.sql.Date
*/
@BeanDefinition
public class TimeSeriesSearchRequest<T> extends DirectBean {
/**
* The request for paging.
* By default all matching items will be returned.
*/
@PropertyDefinition
private PagingRequest _pagingRequest = PagingRequest.ALL;
/**
* The time series identifier for loading specific data points range.
*/
@PropertyDefinition
private UniqueIdentifier _timeSeriesId;
/**
* The identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
*/
@PropertyDefinition
private String _identifierValue;
/**
* The identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
*/
@PropertyDefinition
private final Set<Identifier> _identifiers = new HashSet<Identifier>();
/**
* Current date (if appicalable for identifiers)
*/
@PropertyDefinition
private LocalDate _currentDate;
/**
* The data source, null to search all data sources.
*/
@PropertyDefinition
private String _dataSource;
/**
* The data provider, null to search all data providers.
*/
@PropertyDefinition
private String _dataProvider;
/**
* The data field to search, null to search all data fields.
*/
@PropertyDefinition
private String _dataField;
/**
* The observation time, null to search all observation times.
*/
@PropertyDefinition
private String _observationTime;
/**
* The start date, null to search from start date in data store.
*/
@PropertyDefinition
private T _start;
/**
* The end date, null to search until the end date in data store.
*/
@PropertyDefinition
private T _end;
/**
* Set to true to load data points, otherwise return just meta data.
*/
@PropertyDefinition
private boolean _loadTimeSeries;
/**
* Set to true to load the start and end date for time series.
*/
@PropertyDefinition
private boolean _loadDates;
/**
* Creates an instance.
*/
public TimeSeriesSearchRequest() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code TimeSeriesSearchRequest<T>}.
* @param <R> the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R> TimeSeriesSearchRequest.Meta<R> meta() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public TimeSeriesSearchRequest.Meta<T> metaBean() {
return TimeSeriesSearchRequest.Meta.INSTANCE;
}
@Override
protected Object propertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
return getPagingRequest();
case 1709694943: // timeSeriesId
return getTimeSeriesId();
case 2085582408: // identifierValue
return getIdentifierValue();
case 1368189162: // identifiers
return getIdentifiers();
case 600751303: // currentDate
return getCurrentDate();
case 1272470629: // dataSource
return getDataSource();
case 339742651: // dataProvider
return getDataProvider();
case -386794640: // dataField
return getDataField();
case 951232793: // observationTime
return getObservationTime();
case 109757538: // start
return getStart();
case 100571: // end
return getEnd();
case 1833789738: // loadTimeSeries
return isLoadTimeSeries();
case 1364095295: // loadDates
return isLoadDates();
}
return super.propertyGet(propertyName);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(String propertyName, Object newValue) {
switch (propertyName.hashCode()) {
case -2092032669: // pagingRequest
setPagingRequest((PagingRequest) newValue);
return;
case 1709694943: // timeSeriesId
setTimeSeriesId((UniqueIdentifier) newValue);
return;
case 2085582408: // identifierValue
setIdentifierValue((String) newValue);
return;
case 1368189162: // identifiers
setIdentifiers((Set<Identifier>) newValue);
return;
case 600751303: // currentDate
setCurrentDate((LocalDate) newValue);
return;
case 1272470629: // dataSource
setDataSource((String) newValue);
return;
case 339742651: // dataProvider
setDataProvider((String) newValue);
return;
case -386794640: // dataField
setDataField((String) newValue);
return;
case 951232793: // observationTime
setObservationTime((String) newValue);
return;
case 109757538: // start
setStart((T) newValue);
return;
case 100571: // end
setEnd((T) newValue);
return;
case 1833789738: // loadTimeSeries
setLoadTimeSeries((Boolean) newValue);
return;
case 1364095295: // loadDates
setLoadDates((Boolean) newValue);
return;
}
super.propertySet(propertyName, newValue);
}
//-----------------------------------------------------------------------
/**
* Gets the request for paging.
* By default all matching items will be returned.
* @return the value of the property
*/
public PagingRequest getPagingRequest() {
return _pagingRequest;
}
/**
* Sets the request for paging.
* By default all matching items will be returned.
* @param pagingRequest the new value of the property
*/
public void setPagingRequest(PagingRequest pagingRequest) {
this._pagingRequest = pagingRequest;
}
/**
* Gets the the {@code pagingRequest} property.
* By default all matching items will be returned.
* @return the property, not null
*/
public final Property<PagingRequest> pagingRequest() {
return metaBean().pagingRequest().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time series identifier for loading specific data points range.
* @return the value of the property
*/
public UniqueIdentifier getTimeSeriesId() {
return _timeSeriesId;
}
/**
* Sets the time series identifier for loading specific data points range.
* @param timeSeriesId the new value of the property
*/
public void setTimeSeriesId(UniqueIdentifier timeSeriesId) {
this._timeSeriesId = timeSeriesId;
}
/**
* Gets the the {@code timeSeriesId} property.
* @return the property, not null
*/
public final Property<UniqueIdentifier> timeSeriesId() {
return metaBean().timeSeriesId().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the value of the property
*/
public String getIdentifierValue() {
return _identifierValue;
}
/**
* Sets the identifier value, matching against the <b>value</b> of the identifiers,
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @param identifierValue the new value of the property
*/
public void setIdentifierValue(String identifierValue) {
this._identifierValue = identifierValue;
}
/**
* Gets the the {@code identifierValue} property.
* null to not match by identifier value.
* This matches against the {@link Identifier#getValue() value} of the identifier
* and does not match against the key. Wildcards are allowed.
* This method is suitable for human searching, whereas the {@code identifiers}
* search is useful for exact machine searching.
* @return the property, not null
*/
public final Property<String> identifierValue() {
return metaBean().identifierValue().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the value of the property
*/
public Set<Identifier> getIdentifiers() {
return _identifiers;
}
/**
* Sets the identifiers to match, null to not match on identifiers.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @param identifiers the new value of the property
*/
<<<<<<< MINE
public void setIdentifiers(List<Identifier> identifiers) {
=======
public void setIdentifiers(Set<Identifier> identifiers) {
>>>>>>> YOURS
this._identifiers.clear();
this._identifiers.addAll(identifiers);
}
/**
* Gets the the {@code identifiers} property.
* This will return time series where at least one complete identifier in the series matches
* at least one complete identifier in this bundle. Note that an empty bundle will not match
* anything, whereas a null bundle places no restrictions on the result.
* This method is suitable for exact machine searching, whereas the {@code identifierValue}
* search is useful for human searching.
* @return the property, not null
*/
public final Property<Set<Identifier>> identifiers() {
return metaBean().identifiers().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets current date (if appicalable for identifiers)
* @return the value of the property
*/
public LocalDate getCurrentDate() {
return _currentDate;
}
/**
* Sets current date (if appicalable for identifiers)
* @param currentDate the new value of the property
*/
public void setCurrentDate(LocalDate currentDate) {
this._currentDate = currentDate;
}
/**
* Gets the the {@code currentDate} property.
* @return the property, not null
*/
public final Property<LocalDate> currentDate() {
return metaBean().currentDate().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data source, null to search all data sources.
* @return the value of the property
*/
public String getDataSource() {
return _dataSource;
}
/**
* Sets the data source, null to search all data sources.
* @param dataSource the new value of the property
*/
public void setDataSource(String dataSource) {
this._dataSource = dataSource;
}
/**
* Gets the the {@code dataSource} property.
* @return the property, not null
*/
public final Property<String> dataSource() {
return metaBean().dataSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data provider, null to search all data providers.
* @return the value of the property
*/
public String getDataProvider() {
return _dataProvider;
}
/**
* Sets the data provider, null to search all data providers.
* @param dataProvider the new value of the property
*/
public void setDataProvider(String dataProvider) {
this._dataProvider = dataProvider;
}
/**
* Gets the the {@code dataProvider} property.
* @return the property, not null
*/
public final Property<String> dataProvider() {
return metaBean().dataProvider().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the data field to search, null to search all data fields.
* @return the value of the property
*/
public String getDataField() {
return _dataField;
}
/**
* Sets the data field to search, null to search all data fields.
* @param dataField the new value of the property
*/
public void setDataField(String dataField) {
this._dataField = dataField;
}
/**
* Gets the the {@code dataField} property.
* @return the property, not null
*/
public final Property<String> dataField() {
return metaBean().dataField().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the observation time, null to search all observation times.
* @return the value of the property
*/
public String getObservationTime() {
return _observationTime;
}
/**
* Sets the observation time, null to search all observation times.
* @param observationTime the new value of the property
*/
public void setObservationTime(String observationTime) {
this._observationTime = observationTime;
}
/**
* Gets the the {@code observationTime} property.
* @return the property, not null
*/
public final Property<String> observationTime() {
return metaBean().observationTime().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the start date, null to search from start date in data store.
* @return the value of the property
*/
public T getStart() {
return _start;
}
/**
* Sets the start date, null to search from start date in data store.
* @param start the new value of the property
*/
public void setStart(T start) {
this._start = start;
}
/**
* Gets the the {@code start} property.
* @return the property, not null
*/
public final Property<T> start() {
return metaBean().start().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the end date, null to search until the end date in data store.
* @return the value of the property
*/
public T getEnd() {
return _end;
}
/**
* Sets the end date, null to search until the end date in data store.
* @param end the new value of the property
*/
public void setEnd(T end) {
this._end = end;
}
/**
* Gets the the {@code end} property.
* @return the property, not null
*/
public final Property<T> end() {
return metaBean().end().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load data points, otherwise return just meta data.
* @return the value of the property
*/
public boolean isLoadTimeSeries() {
return _loadTimeSeries;
}
/**
* Sets set to true to load data points, otherwise return just meta data.
* @param loadTimeSeries the new value of the property
*/
public void setLoadTimeSeries(boolean loadTimeSeries) {
this._loadTimeSeries = loadTimeSeries;
}
/**
* Gets the the {@code loadTimeSeries} property.
* @return the property, not null
*/
public final Property<Boolean> loadTimeSeries() {
return metaBean().loadTimeSeries().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets set to true to load the start and end date for time series.
* @return the value of the property
*/
public boolean isLoadDates() {
return _loadDates;
}
/**
* Sets set to true to load the start and end date for time series.
* @param loadDates the new value of the property
*/
public void setLoadDates(boolean loadDates) {
this._loadDates = loadDates;
}
/**
* Gets the the {@code loadDates} property.
* @return the property, not null
*/
public final Property<Boolean> loadDates() {
return metaBean().loadDates().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code TimeSeriesSearchRequest}.
*/
public static class Meta<T> extends BasicMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code pagingRequest} property.
*/
private final MetaProperty<PagingRequest> _pagingRequest = DirectMetaProperty.ofReadWrite(this, "pagingRequest", PagingRequest.class);
/**
* The meta-property for the {@code timeSeriesId} property.
*/
private final MetaProperty<UniqueIdentifier> _timeSeriesId = DirectMetaProperty.ofReadWrite(this, "timeSeriesId", UniqueIdentifier.class);
/**
* The meta-property for the {@code identifierValue} property.
*/
private final MetaProperty<String> _identifierValue = DirectMetaProperty.ofReadWrite(this, "identifierValue", String.class);
/**
* The meta-property for the {@code identifiers} property.
*/
<<<<<<< MINE
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) List.class);
=======
@SuppressWarnings("unchecked")
private final MetaProperty<Set<Identifier>> _identifiers = DirectMetaProperty.ofReadWrite(this, "identifiers", (Class) Set.class);
/**
* The meta-property for the {@code currentDate} property.
*/
private final MetaProperty<LocalDate> _currentDate = DirectMetaProperty.ofReadWrite(this, "currentDate", LocalDate.class);
>>>>>>> YOURS
/**
* The meta-property for the {@code dataSource} property.
*/
private final MetaProperty<String> _dataSource = DirectMetaProperty.ofReadWrite(this, "dataSource", String.class);
/**
* The meta-property for the {@code dataProvider} property.
*/
private final MetaProperty<String> _dataProvider = DirectMetaProperty.ofReadWrite(this, "dataProvider", String.class);
/**
* The meta-property for the {@code dataField} property.
*/
private final MetaProperty<String> _dataField = DirectMetaProperty.ofReadWrite(this, "dataField", String.class);
/**
* The meta-property for the {@code observationTime} property.
*/
private final MetaProperty<String> _observationTime = DirectMetaProperty.ofReadWrite(this, "observationTime", String.class);
/**
* The meta-property for the {@code start} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _start = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "start", Object.class);
/**
* The meta-property for the {@code end} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<T> _end = (DirectMetaProperty) DirectMetaProperty.ofReadWrite(this, "end", Object.class);
/**
* The meta-property for the {@code loadTimeSeries} property.
*/
private final MetaProperty<Boolean> _loadTimeSeries = DirectMetaProperty.ofReadWrite(this, "loadTimeSeries", Boolean.TYPE);
/**
* The meta-property for the {@code loadDates} property.
*/
private final MetaProperty<Boolean> _loadDates = DirectMetaProperty.ofReadWrite(this, "loadDates", Boolean.TYPE);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<Object>> _map;
@SuppressWarnings({"unchecked", "rawtypes" })
protected Meta() {
LinkedHashMap temp = new LinkedHashMap();
temp.put("pagingRequest", _pagingRequest);
temp.put("timeSeriesId", _timeSeriesId);
temp.put("identifierValue", _identifierValue);
temp.put("identifiers", _identifiers);
temp.put("currentDate", _currentDate);
temp.put("dataSource", _dataSource);
temp.put("dataProvider", _dataProvider);
temp.put("dataField", _dataField);
temp.put("observationTime", _observationTime);
temp.put("start", _start);
temp.put("end", _end);
temp.put("loadTimeSeries", _loadTimeSeries);
temp.put("loadDates", _loadDates);
_map = Collections.unmodifiableMap(temp);
}
@Override
public TimeSeriesSearchRequest<T> createBean() {
return new TimeSeriesSearchRequest<T>();
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends TimeSeriesSearchRequest<T>> beanType() {
return (Class) TimeSeriesSearchRequest.class;
}
@Override
public Map<String, MetaProperty<Object>> metaPropertyMap() {
return _map;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code pagingRequest} property.
* @return the meta-property, not null
*/
public final MetaProperty<PagingRequest> pagingRequest() {
return _pagingRequest;
}
/**
* The meta-property for the {@code timeSeriesId} property.
* @return the meta-property, not null
*/
public final MetaProperty<UniqueIdentifier> timeSeriesId() {
return _timeSeriesId;
}
/**
* The meta-property for the {@code identifierValue} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> identifierValue() {
return _identifierValue;
}
/**
* The meta-property for the {@code identifiers} property.
* @return the meta-property, not null
*/
public final MetaProperty<Set<Identifier>> identifiers() {
return _identifiers;
}
/**
* The meta-property for the {@code currentDate} property.
* @return the meta-property, not null
*/
public final MetaProperty<LocalDate> currentDate() {
return _currentDate;
}
/**
* The meta-property for the {@code dataSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataSource() {
return _dataSource;
}
/**
* The meta-property for the {@code dataProvider} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataProvider() {
return _dataProvider;
}
/**
* The meta-property for the {@code dataField} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> dataField() {
return _dataField;
}
/**
* The meta-property for the {@code observationTime} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> observationTime() {
return _observationTime;
}
/**
* The meta-property for the {@code start} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> start() {
return _start;
}
/**
* The meta-property for the {@code end} property.
* @return the meta-property, not null
*/
public final MetaProperty<T> end() {
return _end;
}
/**
* The meta-property for the {@code loadTimeSeries} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadTimeSeries() {
return _loadTimeSeries;
}
/**
* The meta-property for the {@code loadDates} property.
* @return the meta-property, not null
*/
public final MetaProperty<Boolean> loadDates() {
return _loadDates;
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
Diff Result
No diff
Case 48 - java_ogplatform.rev_18d76_f7d0f..TimeSeriesMasterTest.java
Case 49 - java_ogplatform.rev_20353_c0ca1..TradeImpl.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.CompareUtils;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premuim;
/**
* Currency of payment at time of purchase
*/
private Currency _premuimCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premuim;
}
public void setPremuim(final Double premium) {
_premuim = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premuimCurrency;
}
public void setPremiumCurrency(Currency premuimCurrency) {
_premuimCurrency = premuimCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return CompareUtils.compareWithNull(getQuantity(), other.getQuantity()) == 0 &&
ObjectUtils.equals(getCounterparty(), other.getCounterparty()) &&
ObjectUtils.equals(getTradeDate(), other.getTradeDate()) &&
ObjectUtils.equals(getTradeTime(), other.getTradeTime()) &&
ObjectUtils.equals(getSecurityKey(), other.getSecurityKey()) &&
ObjectUtils.equals(getSecurity(), other.getSecurity()) &&
ObjectUtils.equals(getPremium(), other.getPremium()) &&
ObjectUtils.equals(getPremiumCurrency(), other.getPremiumCurrency()) &&
ObjectUtils.equals(getPremiumDate(), other.getPremiumDate()) &&
ObjectUtils.equals(getPremiumTime(), other.getPremiumTime());
}
return false;
}
@Override
public int hashCode() {
int hashCode = 65;
hashCode += getQuantity().hashCode();
hashCode += hashCode ^ ObjectUtils.hashCode(getCounterparty()) ^ ObjectUtils.hashCode(getTradeDate()) ^ ObjectUtils.hashCode(getTradeTime());
if (getSecurity() != null) {
hashCode *= 31;
hashCode += getSecurity().hashCode();
}
hashCode *= 31;
hashCode += ObjectUtils.hashCode(getSecurityKey());
return hashCode;
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.CompareUtils;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premuim;
/**
* Currency of payment at time of purchase
*/
private Currency _premuimCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premuim;
}
public void setPremuim(final Double premium) {
_premuim = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premuimCurrency;
}
public void setPremiumCurrency(Currency premuimCurrency) {
_premuimCurrency = premuimCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return CompareUtils.compareWithNull(getQuantity(), other.getQuantity()) == 0 &&
ObjectUtils.equals(getCounterparty(), other.getCounterparty()) &&
ObjectUtils.equals(getTradeDate(), other.getTradeDate()) &&
ObjectUtils.equals(getTradeTime(), other.getTradeTime()) &&
ObjectUtils.equals(getSecurityKey(), other.getSecurityKey()) &&
ObjectUtils.equals(getSecurity(), other.getSecurity()) &&
ObjectUtils.equals(getPremium(), other.getPremium()) &&
ObjectUtils.equals(getPremiumCurrency(), other.getPremiumCurrency()) &&
ObjectUtils.equals(getPremiumDate(), other.getPremiumDate()) &&
ObjectUtils.equals(getPremiumTime(), other.getPremiumTime());
}
return false;
}
@Override
public int hashCode() {
int hashCode = 65;
hashCode += getQuantity().hashCode();
hashCode += hashCode ^ ObjectUtils.hashCode(getCounterparty()) ^ ObjectUtils.hashCode(getTradeDate()) ^ ObjectUtils.hashCode(getTradeTime());
if (getSecurity() != null) {
hashCode *= 31;
hashCode += getSecurity().hashCode();
}
hashCode *= 31;
hashCode += ObjectUtils.hashCode(getSecurityKey());
return hashCode;
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premuimCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
public void setPremium(final Double premium) {
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premuimCurrency;
}
public void setPremiumCurrency(Currency premuimCurrency) {
_premuimCurrency = premuimCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder()
.append(getQuantity(), other.getQuantity())
.append(getCounterparty(), other.getCounterparty())
.append(getTradeDate(), other.getTradeDate())
.append(getSecurityKey(), other.getSecurityKey())
.append(getSecurity(), other.getSecurity())
.append(getPremium(), other.getPremium())
.append(getPremiumCurrency(), other.getPremiumCurrency())
.append(getPremiumDate(), other.getPremiumDate())
.append(getPremiumTime(), other.getPremiumTime())
.append(getAttributes(), other.getAttributes())
.isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(getQuantity())
.append(getCounterparty())
.append(getTradeDate())
.append(getSecurityKey())
.append(getSecurity())
.append(getPremium())
.append(getPremiumCurrency())
.append(getPremiumCurrency())
.append(getPremiumDate())
.append(getPremiumTime())
.append(getAttributes())
.toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premuimCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
public void setPremium(final Double premium) {
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premuimCurrency;
}
public void setPremiumCurrency(Currency premuimCurrency) {
_premuimCurrency = premuimCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder()
.append(getQuantity(), other.getQuantity())
.append(getCounterparty(), other.getCounterparty())
.append(getTradeDate(), other.getTradeDate())
.append(getSecurityKey(), other.getSecurityKey())
.append(getSecurity(), other.getSecurity())
.append(getPremium(), other.getPremium())
.append(getPremiumCurrency(), other.getPremiumCurrency())
.append(getPremiumDate(), other.getPremiumDate())
.append(getPremiumTime(), other.getPremiumTime())
.append(getAttributes(), other.getAttributes())
.isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(getQuantity())
.append(getCounterparty())
.append(getTradeDate())
.append(getSecurityKey())
.append(getSecurity())
.append(getPremium())
.append(getPremiumCurrency())
.append(getPremiumCurrency())
.append(getPremiumDate())
.append(getPremiumTime())
.append(getAttributes())
.toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.CompareUtils;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
public void setPremuim(final Double premium) {
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return CompareUtils.compareWithNull(getQuantity(), other.getQuantity()) == 0 &&
ObjectUtils.equals(getCounterparty(), other.getCounterparty()) &&
ObjectUtils.equals(getTradeDate(), other.getTradeDate()) &&
ObjectUtils.equals(getTradeTime(), other.getTradeTime()) &&
ObjectUtils.equals(getSecurityKey(), other.getSecurityKey()) &&
ObjectUtils.equals(getSecurity(), other.getSecurity()) &&
ObjectUtils.equals(getPremium(), other.getPremium()) &&
ObjectUtils.equals(getPremiumCurrency(), other.getPremiumCurrency()) &&
ObjectUtils.equals(getPremiumDate(), other.getPremiumDate()) &&
ObjectUtils.equals(getPremiumTime(), other.getPremiumTime());
}
return false;
}
@Override
public int hashCode() {
int hashCode = 65;
hashCode += getQuantity().hashCode();
hashCode += hashCode ^ ObjectUtils.hashCode(getCounterparty()) ^ ObjectUtils.hashCode(getTradeDate()) ^ ObjectUtils.hashCode(getTradeTime());
if (getSecurity() != null) {
hashCode *= 31;
hashCode += getSecurity().hashCode();
}
hashCode *= 31;
hashCode += ObjectUtils.hashCode(getSecurityKey());
return hashCode;
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.CompareUtils;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
public void setPremuim(final Double premium) {
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return CompareUtils.compareWithNull(getQuantity(), other.getQuantity()) == 0 &&
ObjectUtils.equals(getCounterparty(), other.getCounterparty()) &&
ObjectUtils.equals(getTradeDate(), other.getTradeDate()) &&
ObjectUtils.equals(getTradeTime(), other.getTradeTime()) &&
ObjectUtils.equals(getSecurityKey(), other.getSecurityKey()) &&
ObjectUtils.equals(getSecurity(), other.getSecurity()) &&
ObjectUtils.equals(getPremium(), other.getPremium()) &&
ObjectUtils.equals(getPremiumCurrency(), other.getPremiumCurrency()) &&
ObjectUtils.equals(getPremiumDate(), other.getPremiumDate()) &&
ObjectUtils.equals(getPremiumTime(), other.getPremiumTime());
}
return false;
}
@Override
public int hashCode() {
int hashCode = 65;
hashCode += getQuantity().hashCode();
hashCode += hashCode ^ ObjectUtils.hashCode(getCounterparty()) ^ ObjectUtils.hashCode(getTradeDate()) ^ ObjectUtils.hashCode(getTradeTime());
if (getSecurity() != null) {
hashCode *= 31;
hashCode += getSecurity().hashCode();
}
hashCode *= 31;
hashCode += ObjectUtils.hashCode(getSecurityKey());
return hashCode;
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
public void setPremium(final Double premium) {
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder().append(getQuantity(), other.getQuantity()).append(getCounterparty(), other.getCounterparty()).append(getTradeDate(), other.getTradeDate()).append(getSecurityKey(), other.getSecurityKey()).append(getSecurity(), other.getSecurity()).append(getPremium(), other.getPremium()).append(getPremiumCurrency(), other.getPremiumCurrency()).append(getPremiumDate(), other.getPremiumDate()).append(getPremiumTime(), other.getPremiumTime()).append(getAttributes(), other.getAttributes()).isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(getQuantity()).append(getCounterparty()).append(getTradeDate()).append(getSecurityKey()).append(getSecurity()).append(getPremium()).append(getPremiumCurrency()).append(getPremiumCurrency()).append(getPremiumDate()).append(getPremiumTime()).append(getAttributes()).toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256).append("Trade[").append(getUniqueId()).append(", ").append(getQuantity()).append(' ').append(getSecurity() != null ? getSecurity() : getSecurityKey()).append(" PositionID:").append(getParentPositionId()).append(" ").append(getCounterparty()).append(" ").append(getTradeDate()).append(" ").append(getTradeTime()).append(']').toString();
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
public void setPremium(final Double premium) {
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder().append(getQuantity(), other.getQuantity()).append(getCounterparty(), other.getCounterparty()).append(getTradeDate(), other.getTradeDate()).append(getSecurityKey(), other.getSecurityKey()).append(getSecurity(), other.getSecurity()).append(getPremium(), other.getPremium()).append(getPremiumCurrency(), other.getPremiumCurrency()).append(getPremiumDate(), other.getPremiumDate()).append(getPremiumTime(), other.getPremiumTime()).append(getAttributes(), other.getAttributes()).isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(getQuantity()).append(getCounterparty()).append(getTradeDate()).append(getSecurityKey()).append(getSecurity()).append(getPremium()).append(getPremiumCurrency()).append(getPremiumCurrency()).append(getPremiumDate()).append(getPremiumTime()).append(getAttributes()).toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256).append("Trade[").append(getUniqueId()).append(", ").append(getQuantity()).append(' ').append(getSecurity() != null ? getSecurity() : getSecurityKey()).append(" PositionID:").append(getParentPositionId()).append(" ").append(getCounterparty()).append(" ").append(getTradeDate()).append(" ").append(getTradeTime()).append(']').toString();
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
public void setPremium(final Double premium) {
_premium = premium;
}
public void setPremuim(final Double premium) {
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder().append(getQuantity(), other.getQuantity()).append(getCounterparty(), other.getCounterparty()).append(getTradeDate(), other.getTradeDate()).append(getSecurityKey(), other.getSecurityKey()).append(getSecurity(), other.getSecurity()).append(getPremium(), other.getPremium()).append(getPremiumCurrency(), other.getPremiumCurrency()).append(getPremiumDate(), other.getPremiumDate()).append(getPremiumTime(), other.getPremiumTime()).append(getAttributes(), other.getAttributes()).isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(getQuantity()).append(getCounterparty()).append(getTradeDate()).append(getSecurityKey()).append(getSecurity()).append(getPremium()).append(getPremiumCurrency()).append(getPremiumCurrency()).append(getPremiumDate()).append(getPremiumTime()).append(getAttributes()).toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256).append("Trade[").append(getUniqueId()).append(", ").append(getQuantity()).append(' ').append(getSecurity() != null ? getSecurity() : getSecurityKey()).append(" PositionID:").append(getParentPositionId()).append(" ").append(getCounterparty()).append(" ").append(getTradeDate()).append(" ").append(getTradeTime()).append(']').toString();
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
public void setPremium(final Double premium) {
_premium = premium;
}
public void setPremuim(final Double premium) {
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder().append(getQuantity(), other.getQuantity()).append(getCounterparty(), other.getCounterparty()).append(getTradeDate(), other.getTradeDate()).append(getSecurityKey(), other.getSecurityKey()).append(getSecurity(), other.getSecurity()).append(getPremium(), other.getPremium()).append(getPremiumCurrency(), other.getPremiumCurrency()).append(getPremiumDate(), other.getPremiumDate()).append(getPremiumTime(), other.getPremiumTime()).append(getAttributes(), other.getAttributes()).isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(getQuantity()).append(getCounterparty()).append(getTradeDate()).append(getSecurityKey()).append(getSecurity()).append(getPremium()).append(getPremiumCurrency()).append(getPremiumCurrency()).append(getPremiumDate()).append(getPremiumTime()).append(getAttributes()).toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256).append("Trade[").append(getUniqueId()).append(", ").append(getQuantity()).append(' ').append(getSecurity() != null ? getSecurity() : getSecurityKey()).append(" PositionID:").append(getParentPositionId()).append(" ").append(getCounterparty()).append(" ").append(getTradeDate()).append(" ").append(getTradeTime()).append(']').toString();
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
<<<<<<< MINE
public void setPremuim(final Double premium) {
_premium = premium;
}
=======
public void setPremium(final Double premium) {
_premium = premium;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder()
.append(getQuantity(), other.getQuantity())
.append(getCounterparty(), other.getCounterparty())
.append(getTradeDate(), other.getTradeDate())
.append(getSecurityKey(), other.getSecurityKey())
.append(getSecurity(), other.getSecurity())
.append(getPremium(), other.getPremium())
.append(getPremiumCurrency(), other.getPremiumCurrency())
.append(getPremiumDate(), other.getPremiumDate())
.append(getPremiumTime(), other.getPremiumTime())
.append(getAttributes(), other.getAttributes())
.isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(getQuantity())
.append(getCounterparty())
.append(getTradeDate())
.append(getSecurityKey())
.append(getSecurity())
.append(getPremium())
.append(getPremiumCurrency())
.append(getPremiumCurrency())
.append(getPremiumDate())
.append(getPremiumTime())
.append(getAttributes())
.toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
<<<<<<< MINE
public void setPremuim(final Double premium) {
_premium = premium;
}
=======
public void setPremium(final Double premium) {
_premium = premium;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder()
.append(getQuantity(), other.getQuantity())
.append(getCounterparty(), other.getCounterparty())
.append(getTradeDate(), other.getTradeDate())
.append(getSecurityKey(), other.getSecurityKey())
.append(getSecurity(), other.getSecurity())
.append(getPremium(), other.getPremium())
.append(getPremiumCurrency(), other.getPremiumCurrency())
.append(getPremiumDate(), other.getPremiumDate())
.append(getPremiumTime(), other.getPremiumTime())
.append(getAttributes(), other.getAttributes())
.isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(getQuantity())
.append(getCounterparty())
.append(getTradeDate())
.append(getSecurityKey())
.append(getSecurity())
.append(getPremium())
.append(getPremiumCurrency())
.append(getPremiumCurrency())
.append(getPremiumDate())
.append(getPremiumTime())
.append(getAttributes())
.toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
<<<<<<< MINE
public void setPremium(final Double premium) {
=======
public void setPremuim(final Double premium) {
>>>>>>> YOURS
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder()
.append(getQuantity(), other.getQuantity())
.append(getCounterparty(), other.getCounterparty())
.append(getTradeDate(), other.getTradeDate())
.append(getSecurityKey(), other.getSecurityKey())
.append(getSecurity(), other.getSecurity())
.append(getPremium(), other.getPremium())
.append(getPremiumCurrency(), other.getPremiumCurrency())
.append(getPremiumDate(), other.getPremiumDate())
.append(getPremiumTime(), other.getPremiumTime())
.append(getAttributes(), other.getAttributes())
.isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(getQuantity())
.append(getCounterparty())
.append(getTradeDate())
.append(getSecurityKey())
.append(getSecurity())
.append(getPremium())
.append(getPremiumCurrency())
.append(getPremiumCurrency())
.append(getPremiumDate())
.append(getPremiumTime())
.append(getAttributes())
.toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.position.impl;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetTime;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.text.StrBuilder;
import com.opengamma.core.position.Counterparty;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.id.Identifier;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.MutableUniqueIdentifiable;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.Currency;
/**
* A simple mutable implementation of {@code Trade}.
*/
public class TradeImpl implements Trade, MutableUniqueIdentifiable, Serializable {
/** Serialization version. */
private static final long serialVersionUID = 1L;
/**
* The unique identifier of the trade.
*/
private UniqueIdentifier _uniqueId;
/**
* The unique identifier of the parent position.
*/
private UniqueIdentifier _parentPositionId;
/**
* The identifier specifying the security.
*/
private IdentifierBundle _securityKey;
/**
* The security.
*/
private Security _security;
/**
* The amount of the position.
*/
private BigDecimal _quantity;
/**
* The counterparty.
*/
private Counterparty _counterparty;
/**
* The trade date.
*/
private LocalDate _tradeDate;
/**
* The trade time with offset.
*/
private OffsetTime _tradeTime;
/**
* Amount paid for trade at time of purchase
*/
private Double _premium;
/**
* Currency of payment at time of purchase
*/
private Currency _premiumCurrency;
/**
* Date of premium payment
*/
private LocalDate _premiumDate;
/**
* Time of premium payment
*/
private OffsetTime _premiumTime;
/**
* The trade attributes
*/
private Map<String, String> _attributes = new HashMap<String, String>();
/**
* Creates a trade which must be initialized by calling methods.
*/
public TradeImpl() {
}
/**
* Creates a trade from a position, counterparty, trade instant, and an amount.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Identifier securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = IdentifierBundle.of(securityKey);
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security identified by key, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param securityKey the security identifier, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, IdentifierBundle securityKey, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(securityKey, "securityKey");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_securityKey = securityKey;
_security = null;
}
/**
* Creates a trade from a positionId, an amount of a security, counterparty and tradeinstant.
*
* @param parentPositionId the parent position id, not null
* @param security the security, not null
* @param quantity the amount of the trade, not null
* @param counterparty the counterparty, not null
* @param tradeDate the trade date, not null
* @param tradeTime the trade time with offset, may be null
*/
public TradeImpl(UniqueIdentifier parentPositionId, Security security, BigDecimal quantity, Counterparty counterparty, LocalDate tradeDate, OffsetTime tradeTime) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
ArgumentChecker.notNull(security, "security");
ArgumentChecker.notNull(quantity, "quantity");
ArgumentChecker.notNull(counterparty, "counterparty");
ArgumentChecker.notNull(tradeDate, "tradeDate");
_quantity = quantity;
_counterparty = counterparty;
_tradeDate = tradeDate;
_tradeTime = tradeTime;
_parentPositionId = parentPositionId;
_security = security;
_securityKey = security.getIdentifiers();
}
/**
* Construct a mutable trade copying data from another, possibly immutable, {@link Trade} implementation.
*
* @param copyFrom instance to copy fields from, not null
*/
public TradeImpl(final Trade copyFrom) {
ArgumentChecker.notNull(copyFrom, "copyFrom");
_uniqueId = copyFrom.getUniqueId();
_quantity = copyFrom.getQuantity();
_counterparty = copyFrom.getCounterparty();
_tradeDate = copyFrom.getTradeDate();
_tradeTime = copyFrom.getTradeTime();
_parentPositionId = copyFrom.getParentPositionId();
_securityKey = copyFrom.getSecurityKey();
_security = copyFrom.getSecurity();
_attributes.putAll(copyFrom.getAttributes());
}
//-------------------------------------------------------------------------
/**
* Gets the unique identifier of the trade.
*
* @return the identifier, not null
*/
@Override
public UniqueIdentifier getUniqueId() {
return _uniqueId;
}
/**
* Sets the unique identifier of the trade.
*
* @param identifier the new identifier, not null
*/
public void setUniqueId(UniqueIdentifier identifier) {
ArgumentChecker.notNull(identifier, "identifier");
_uniqueId = identifier;
}
//-------------------------------------------------------------------------
/**
* Gets the parent position unique identifier.
*
* @return the parent position unique identifier, not null
*/
@Override
public UniqueIdentifier getParentPositionId() {
return _parentPositionId;
}
/**
* Sets the parent position unique identifier.
*
* @param parentPositionId the parent position unique identifier, not null
*/
public void setParentPositionId(UniqueIdentifier parentPositionId) {
ArgumentChecker.notNull(parentPositionId, "parentPositionId");
_parentPositionId = parentPositionId;
}
//-------------------------------------------------------------------------
/**
* Gets the amount of the position held in terms of the security.
*
* @return the amount of the position, not null
*/
@Override
public BigDecimal getQuantity() {
return _quantity;
}
/**
* Sets the amount of the position held in terms of the security.
*
* @param quantity the amount of the position, not null
*/
public void setQuantity(BigDecimal quantity) {
ArgumentChecker.notNull(quantity, "quantity");
_quantity = quantity;
}
//-------------------------------------------------------------------------
/**
* Gets a key to the security being held.
* <p>
* This allows the security to be referenced without actually loading the security itself.
*
* @return the security key
*/
@Override
public IdentifierBundle getSecurityKey() {
return _securityKey;
}
/**
* Sets the key to the security being held.
*
* @param securityKey the security key, may be null
*/
public void setSecurityKey(IdentifierBundle securityKey) {
_securityKey = securityKey;
}
/**
* Adds an identifier to the security key.
*
* @param securityKeyIdentifier the identifier to add, not null
*/
public void addSecurityKey(final Identifier securityKeyIdentifier) {
ArgumentChecker.notNull(securityKeyIdentifier, "securityKeyIdentifier");
if (getSecurityKey() != null) {
setSecurityKey(getSecurityKey().withIdentifier(securityKeyIdentifier));
} else {
setSecurityKey(IdentifierBundle.of(securityKeyIdentifier));
}
}
//-------------------------------------------------------------------------
/**
* Gets the security being held, returning {@code null} if it has not been loaded.
* <p>
* This method is guaranteed to return a security within an analytic function.
*
* @return the security
*/
@Override
public Security getSecurity() {
return _security;
}
/**
* Sets the security being held.
*
* @param security the security, may be null
*/
public void setSecurity(Security security) {
_security = security;
}
//-------------------------------------------------------------------------
/**
* Gets the counterparty.
*
* @return the counterparty, may be null
*/
@Override
public Counterparty getCounterparty() {
return _counterparty;
}
/**
* Sets the counterparty.
*
* @param counterparty the counterparty, may be null
*/
public void setCounterparty(Counterparty counterparty) {
_counterparty = counterparty;
}
//-------------------------------------------------------------------------
/**
* Gets the trade date.
*
* @return the trade date, may be null
*/
@Override
public LocalDate getTradeDate() {
return _tradeDate;
}
/**
* Sets the trade date.
*
* @param tradeDate the trade date, may be null
*/
public void setTradeDate(LocalDate tradeDate) {
_tradeDate = tradeDate;
}
//-------------------------------------------------------------------------
/**
* Gets the trade time.
*
* @return the trade time, may be null
*/
@Override
public OffsetTime getTradeTime() {
return _tradeTime;
}
/**
* Sets the trade time.
*
* @param tradeTime the trade time, may be null
*/
public void setTradeTime(OffsetTime tradeTime) {
_tradeTime = tradeTime;
}
//-------------------------------------------------------------------------
@Override
public Double getPremium() {
return _premium;
}
<<<<<<< MINE
public void setPremium(final Double premium) {
=======
public void setPremuim(final Double premium) {
>>>>>>> YOURS
_premium = premium;
}
//-------------------------------------------------------------------------
@Override
public Currency getPremiumCurrency() {
return _premiumCurrency;
}
public void setPremiumCurrency(Currency premiumCurrency) {
_premiumCurrency = premiumCurrency;
}
//-------------------------------------------------------------------------
@Override
public LocalDate getPremiumDate() {
return _premiumDate;
}
public void setPremiumDate(LocalDate premiumDate) {
_premiumDate = premiumDate;
}
//-------------------------------------------------------------------------
@Override
public OffsetTime getPremiumTime() {
return _premiumTime;
}
public void setPremiumTime(OffsetTime premiumTime) {
_premiumTime = premiumTime;
}
@Override
public Map<String, String> getAttributes() {
return Collections.unmodifiableMap(_attributes);
}
public void addAttribute(String key, String value) {
ArgumentChecker.notNull(key, "key");
ArgumentChecker.notNull(value, "value");
_attributes.put(key, value);
}
public void setAttributes(Map<String, String> attributes) {
ArgumentChecker.notNull(attributes, "attributes");
for (Entry<String, String> entry : attributes.entrySet()) {
addAttribute(entry.getKey(), entry.getValue());
}
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TradeImpl) {
TradeImpl other = (TradeImpl) obj;
return new EqualsBuilder()
.append(getQuantity(), other.getQuantity())
.append(getCounterparty(), other.getCounterparty())
.append(getTradeDate(), other.getTradeDate())
.append(getSecurityKey(), other.getSecurityKey())
.append(getSecurity(), other.getSecurity())
.append(getPremium(), other.getPremium())
.append(getPremiumCurrency(), other.getPremiumCurrency())
.append(getPremiumDate(), other.getPremiumDate())
.append(getPremiumTime(), other.getPremiumTime())
.append(getAttributes(), other.getAttributes())
.isEquals();
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(getQuantity())
.append(getCounterparty())
.append(getTradeDate())
.append(getSecurityKey())
.append(getSecurity())
.append(getPremium())
.append(getPremiumCurrency())
.append(getPremiumCurrency())
.append(getPremiumDate())
.append(getPremiumTime())
.append(getAttributes())
.toHashCode();
}
@Override
public String toString() {
return new StrBuilder(256)
.append("Trade[")
.append(getUniqueId())
.append(", ")
.append(getQuantity())
.append(' ')
.append(getSecurity() != null ? getSecurity() : getSecurityKey())
.append(" PositionID:")
.append(getParentPositionId())
.append(" ")
.append(getCounterparty())
.append(" ")
.append(getTradeDate())
.append(" ")
.append(getTradeTime())
.append(']')
.toString();
}
}
Diff Result
No diff
Case 50 - java_ogplatform.rev_28db5_41a62..SimpleInterpolatedYieldAndDiscountCurveFunction.java
Base
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.Validate;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.time.DateUtil;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private InterpolatedYieldAndDiscountCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldAndDiscountCurveSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_definition));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldAndDiscountCurveDefinition definition) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final FixedIncomeStrip strip : definition.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getMarketDataSpecification());
result.add(requirement);
}
return result;
}
/**
* @return the definition
*/
public InterpolatedYieldAndDiscountCurveDefinition getDefinition() {
return _definition;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
return ObjectUtils.equals(target.getUniqueIdentifier(), getDefinition().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final LocalDate today = snapshotClock.today(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final FixedIncomeStrip strip : getDefinition().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getMarketDataSpecification());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrument.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
timeInYearsToRates.put(numYears, price);
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
System.err.println(timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.Validate;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.time.DateUtil;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private InterpolatedYieldAndDiscountCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldAndDiscountCurveSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_definition));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldAndDiscountCurveDefinition definition) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final FixedIncomeStrip strip : definition.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getMarketDataSpecification());
result.add(requirement);
}
return result;
}
/**
* @return the definition
*/
public InterpolatedYieldAndDiscountCurveDefinition getDefinition() {
return _definition;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
return ObjectUtils.equals(target.getUniqueIdentifier(), getDefinition().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final LocalDate today = snapshotClock.today(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final FixedIncomeStrip strip : getDefinition().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getMarketDataSpecification());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrument.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
timeInYearsToRates.put(numYears, price);
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
System.err.println(timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
Left
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.time.DateUtil;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private InterpolatedYieldAndDiscountCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldAndDiscountCurveSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_definition));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldAndDiscountCurveDefinition definition) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final FixedIncomeStrip strip : definition.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getMarketDataSpecification());
result.add(requirement);
}
return result;
}
/**
* @return the definition
*/
public InterpolatedYieldAndDiscountCurveDefinition getDefinition() {
return _definition;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
return ObjectUtils.equals(target.getUniqueIdentifier(), getDefinition().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final FixedIncomeStrip strip : getDefinition().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getMarketDataSpecification());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrument.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.time.DateUtil;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private InterpolatedYieldAndDiscountCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldAndDiscountCurveSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_definition));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldAndDiscountCurveDefinition definition) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final FixedIncomeStrip strip : definition.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getMarketDataSpecification());
result.add(requirement);
}
return result;
}
/**
* @return the definition
*/
public InterpolatedYieldAndDiscountCurveDefinition getDefinition() {
return _definition;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
return ObjectUtils.equals(target.getUniqueIdentifier(), getDefinition().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final FixedIncomeStrip strip : getDefinition().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getMarketDataSpecification());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrument.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
Right
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.Validate;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(curveDate, "Curve Date");
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
_definition = null;
_curveDate = curveDate;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final LocalDate today = snapshotClock.today(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
timeInYearsToRates.put(strip.getYears(), price);
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
final double numYears = strip.getYears();
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
System.err.println(timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.Validate;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(curveDate, "Curve Date");
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
_definition = null;
_curveDate = curveDate;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final LocalDate today = snapshotClock.today(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
timeInYearsToRates.put(strip.getYears(), price);
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
final double numYears = strip.getYears();
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
System.err.println(timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
/**
* @return the definition
*/
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
<<<<<<< MINE
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
=======
timeInYearsToRates.put(strip.getYears(), price);
>>>>>>> YOURS
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
<<<<<<< MINE
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
=======
final double numYears = strip.getYears();
>>>>>>> YOURS
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
/**
* @return the definition
*/
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
<<<<<<< MINE
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
=======
timeInYearsToRates.put(strip.getYears(), price);
>>>>>>> YOURS
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
<<<<<<< MINE
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
=======
final double numYears = strip.getYears();
>>>>>>> YOURS
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(curveDate, "Curve Date");
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
_definition = null;
_curveDate = curveDate;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
/**
* @return the definition
*/
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
<<<<<<< MINE
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
=======
timeInYearsToRates.put(strip.getYears(), price);
>>>>>>> YOURS
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
<<<<<<< MINE
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
=======
final double numYears = strip.getYears();
>>>>>>> YOURS
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(curveDate, "Curve Date");
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
_definition = null;
_curveDate = curveDate;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
/**
* @return the definition
*/
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
<<<<<<< MINE
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
=======
timeInYearsToRates.put(strip.getYears(), price);
>>>>>>> YOURS
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
<<<<<<< MINE
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
=======
final double numYears = strip.getYears();
>>>>>>> YOURS
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
Safe
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
<<<<<<< MINE
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(curveDate, "Curve Date");
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
_definition = null;
_curveDate = curveDate;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
=======
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
>>>>>>> YOURS
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
/**
* @return the definition
*/
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
<<<<<<< MINE
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
=======
timeInYearsToRates.put(strip.getYears(), price);
>>>>>>> YOURS
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
<<<<<<< MINE
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
=======
final double numYears = strip.getYears();
>>>>>>> YOURS
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
<<<<<<< MINE
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(curveDate, "Curve Date");
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
_definition = null;
_curveDate = curveDate;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
=======
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
_definition = null;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
>>>>>>> YOURS
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
/**
* @return the definition
*/
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
<<<<<<< MINE
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
=======
timeInYearsToRates.put(strip.getYears(), price);
>>>>>>> YOURS
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
<<<<<<< MINE
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
=======
final double numYears = strip.getYears();
>>>>>>> YOURS
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
Unstructured
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
<<<<<<< MINE
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.time.DateUtil;
=======
>>>>>>> YOURS
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
<<<<<<< MINE
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
=======
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(curveDate, "Curve Date");
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
>>>>>>> YOURS
_definition = null;
_curveDate = curveDate;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
<<<<<<< MINE
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
=======
timeInYearsToRates.put(strip.getYears(), price);
>>>>>>> YOURS
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
<<<<<<< MINE
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
=======
final double numYears = strip.getYears();
>>>>>>> YOURS
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.ircurve;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import javax.time.calendar.Clock;
import javax.time.calendar.TimeZone;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.function.FunctionInvoker;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.Currency;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.model.interestrate.curve.InterpolatedDiscountCurve;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.livedata.normalization.MarketDataRequirementNames;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
<<<<<<< MINE
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.time.DateUtil;
=======
>>>>>>> YOURS
/**
*
*/
public class SimpleInterpolatedYieldAndDiscountCurveFunction extends AbstractFunction implements FunctionInvoker {
@SuppressWarnings("unchecked")
private Interpolator1D _interpolator;
private YieldCurveDefinition _definition;
private Set<ValueRequirement> _requirements;
private ValueSpecification _result;
private Set<ValueSpecification> _results;
private final Currency _curveCurrency;
private final String _curveName;
private final boolean _isYieldCurve;
private LocalDate _curveDate;
private InterpolatedYieldCurveSpecification _specification;
<<<<<<< MINE
public SimpleInterpolatedYieldAndDiscountCurveFunction(final Currency currency, final String name, final boolean isYieldCurve) {
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(name, "name");
=======
public SimpleInterpolatedYieldAndDiscountCurveFunction(final LocalDate curveDate, final Currency currency, final String name, final boolean isYieldCurve) {
Validate.notNull(curveDate, "Curve Date");
Validate.notNull(currency, "Currency");
Validate.notNull(name, "Name");
>>>>>>> YOURS
_definition = null;
_curveDate = curveDate;
_curveCurrency = currency;
_curveName = name;
_isYieldCurve = isYieldCurve;
_interpolator = null;
_requirements = null;
_result = null;
_results = null;
}
@Override
public void init(final FunctionCompilationContext context) {
final InterpolatedYieldCurveDefinitionSource curveSource = OpenGammaCompilationContext.getDiscountCurveSource(context);
final InterpolatedYieldCurveSpecificationBuilder specBuilder = OpenGammaCompilationContext.getYieldCurveSpecificationBuilder(context);
_definition = curveSource.getDefinition(_curveCurrency, _curveName);
_specification = specBuilder.buildCurve(_curveDate, _definition);
_interpolator = Interpolator1DFactory.getInterpolator(_definition.getInterpolatorName());
_requirements = Collections.unmodifiableSet(buildRequirements(_specification));
_result = new ValueSpecification(new ValueRequirement(_isYieldCurve ? ValueRequirementNames.YIELD_CURVE : ValueRequirementNames.DISCOUNT_CURVE, _definition.getCurrency()));
_results = Collections.singleton(_result);
}
public static Set<ValueRequirement> buildRequirements(final InterpolatedYieldCurveSpecification specification) {
final Set<ValueRequirement> result = new HashSet<ValueRequirement>();
for (final ResolvedFixedIncomeStrip strip : specification.getStrips()) {
final ValueRequirement requirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
result.add(requirement);
}
return result;
}
/**
* @return the specification
*/
public InterpolatedYieldCurveSpecification getSpecification() {
return _specification;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.PRIMITIVE) {
return false;
}
// REVIEW: jim 23-July-2010 is this enough? Probably not, but I'm not entirely sure what the deal with the Ids is...
return ObjectUtils.equals(target.getUniqueIdentifier(), getSpecification().getCurrency().getUniqueIdentifier());
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _requirements;
}
return null;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
if (canApplyTo(context, target)) {
return _results;
}
return null;
}
@Override
public boolean buildsOwnSubGraph() {
return false;
}
@Override
public String getShortName() {
return _curveCurrency + "-" + _curveName + (_isYieldCurve ? " Yield Curve" : " Discount Curve");
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.PRIMITIVE;
}
@SuppressWarnings("unchecked")
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
// Gather market data rates
// Note that this assumes that all strips are priced in decimal percent. We need to resolve
// that ultimately in OG-LiveData normalization and pull out the OGRate key rather than
// the crazy IndicativeValue name.
final Clock snapshotClock = executionContext.getSnapshotClock();
final ZonedDateTime today = snapshotClock.zonedDateTime(); // TODO: change to times
final Map<Double, Double> timeInYearsToRates = new TreeMap<Double, Double>();
boolean isFirst = true;
for (final ResolvedFixedIncomeStrip strip : getSpecification().getStrips()) {
final ValueRequirement stripRequirement = new ValueRequirement(MarketDataRequirementNames.INDICATIVE_VALUE, strip.getSecurity().getIdentifiers());
Double price = (Double) inputs.getValue(stripRequirement);
if (strip.getInstrumentType() == StripInstrumentType.FUTURE) {
price = (100d - price);
}
price /= 100d;
if (_isYieldCurve) {
if (isFirst) {
// TODO This is here to avoid problems with instruments with expiry < 1 day
// At the moment, interpolators don't extrapolate, and so asking for the rate
// if t < 1 throws an exception. It doesn't actually matter in the case of discount curves,
// because df at t = 0 is 1 by definition, but for yield curves this should change when
// extrapolation is allowed
timeInYearsToRates.put(0., 0.);
isFirst = false;
}
<<<<<<< MINE
// final double numYears = (strip.getEndDate().toEpochDays() - today.toEpochDays()) / DateUtil.DAYS_PER_YEAR;
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
timeInYearsToRates.put(numYears, price);
=======
timeInYearsToRates.put(strip.getYears(), price);
>>>>>>> YOURS
} else {
if (isFirst) {
timeInYearsToRates.put(0., 1.);
isFirst = false;
}
<<<<<<< MINE
final double numYears = DateUtil.getDifferenceInYears(today, strip.getEndDate().atStartOfDayInZone(TimeZone.UTC));
=======
final double numYears = strip.getYears();
>>>>>>> YOURS
timeInYearsToRates.put(numYears, Math.exp(-price * numYears));
}
}
// System.err.println("Time in years to rates: " + timeInYearsToRates);
// Bootstrap the yield curve
final YieldAndDiscountCurve curve = _isYieldCurve ? new InterpolatedYieldCurve(timeInYearsToRates, _interpolator) : new InterpolatedDiscountCurve(timeInYearsToRates, _interpolator);
final ComputedValue resultValue = new ComputedValue(_result, curve);
return Collections.singleton(resultValue);
}
}
Diff Result
No diff
Case 51 - java_ogplatform.rev_30885_84ece..MasterPortfolioReader.java
Base
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.loader.LoaderContext;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.ManageablePosition;
import com.opengamma.master.position.ManageableTrade;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityMaster;
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
Map<String, String> row;
// TODO to implement recursive portfolio traversal, outputting positions, trades and securities
// // Get the next position/trade from the portfolio
// while ((row = getSheet().loadNextRow()) != null) {
//
// // Attempt to write securities and obtain the correct security (either newly written or original)
// // Write array in reverse order as underlying is at position 0
// for (int i = security.length - 1; i >= 0; i--) {
// security[i] = portfolioWriter.writeSecurity(security[i]);
// }
//
// // Build the position and trade(s) using security[0] (underlying)
// ManageablePosition position = _rowParser.constructPosition(row, security[0]);
//
// ManageableTrade trade = _rowParser.constructTrade(row, security[0], position);
// if (trade != null) {
// position.addTrade(trade);
// }
//
// // Write positions/trade(s) to masters (trades are implicitly written with the position)
// portfolioWriter.writePosition(position);
// }
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.loader.LoaderContext;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.ManageablePosition;
import com.opengamma.master.position.ManageableTrade;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityMaster;
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
Map<String, String> row;
// TODO to implement recursive portfolio traversal, outputting positions, trades and securities
// // Get the next position/trade from the portfolio
// while ((row = getSheet().loadNextRow()) != null) {
//
// // Attempt to write securities and obtain the correct security (either newly written or original)
// // Write array in reverse order as underlying is at position 0
// for (int i = security.length - 1; i >= 0; i--) {
// security[i] = portfolioWriter.writeSecurity(security[i]);
// }
//
// // Build the position and trade(s) using security[0] (underlying)
// ManageablePosition position = _rowParser.constructPosition(row, security[0]);
//
// ManageableTrade trade = _rowParser.constructTrade(row, security[0], position);
// if (trade != null) {
// position.addTrade(trade);
// }
//
// // Write positions/trade(s) to masters (trades are implicitly written with the position)
// portfolioWriter.writePosition(position);
// }
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
Left
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.tool.ToolContext;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = toolContext.getPortfolioMaster();
_positionMaster = toolContext.getPositionMaster();
_securityMaster = toolContext.getSecurityMaster();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
Map<String, String> row;
// TODO to implement recursive portfolio traversal, outputting positions, trades and securities
// // Get the next position/trade from the portfolio
// while ((row = getSheet().loadNextRow()) != null) {
//
// // Attempt to write securities and obtain the correct security (either newly written or original)
// // Write array in reverse order as underlying is at position 0
// for (int i = security.length - 1; i >= 0; i--) {
// security[i] = portfolioWriter.writeSecurity(security[i]);
// }
//
// // Build the position and trade(s) using security[0] (underlying)
// ManageablePosition position = _rowParser.constructPosition(row, security[0]);
//
// ManageableTrade trade = _rowParser.constructTrade(row, security[0], position);
// if (trade != null) {
// position.addTrade(trade);
// }
//
// // Write positions/trade(s) to masters (trades are implicitly written with the position)
// portfolioWriter.writePosition(position);
// }
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.tool.ToolContext;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = toolContext.getPortfolioMaster();
_positionMaster = toolContext.getPositionMaster();
_securityMaster = toolContext.getSecurityMaster();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
Map<String, String> row;
// TODO to implement recursive portfolio traversal, outputting positions, trades and securities
// // Get the next position/trade from the portfolio
// while ((row = getSheet().loadNextRow()) != null) {
//
// // Attempt to write securities and obtain the correct security (either newly written or original)
// // Write array in reverse order as underlying is at position 0
// for (int i = security.length - 1; i >= 0; i--) {
// security[i] = portfolioWriter.writeSecurity(security[i]);
// }
//
// // Build the position and trade(s) using security[0] (underlying)
// ManageablePosition position = _rowParser.constructPosition(row, security[0]);
//
// ManageableTrade trade = _rowParser.constructTrade(row, security[0], position);
// if (trade != null) {
// position.addTrade(trade);
// }
//
// // Write positions/trade(s) to masters (trades are implicitly written with the position)
// portfolioWriter.writePosition(position);
// }
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
Right
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.financial.loader.LoaderContext;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.ManageablePosition;
import com.opengamma.master.position.ManageableTrade;
import com.opengamma.master.position.PositionDocument;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityMaster;
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster,
PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.financial.loader.LoaderContext;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.ManageablePosition;
import com.opengamma.master.position.ManageableTrade;
import com.opengamma.master.position.PositionDocument;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityMaster;
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster,
PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
MergeMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.tool.ToolContext;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.PositionDocument;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.tool.ToolContext;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.PositionDocument;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
KeepBothMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.tool.ToolContext;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.PositionDocument;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = toolContext.getPortfolioMaster();
_positionMaster = toolContext.getPositionMaster();
_securityMaster = toolContext.getSecurityMaster();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.tool.ToolContext;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.PositionDocument;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = toolContext.getPortfolioMaster();
_positionMaster = toolContext.getPositionMaster();
_securityMaster = toolContext.getSecurityMaster();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
_portfolioDocument = openPortfolio(portfolioName);
}
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
Safe
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.tool.ToolContext;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.PositionDocument;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
<<<<<<< MINE
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
_portfolioDocument = openPortfolio(portfolioName);
}
=======
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = toolContext.getPortfolioMaster();
_positionMaster = toolContext.getPositionMaster();
_securityMaster = toolContext.getSecurityMaster();
_portfolioDocument = openPortfolio(portfolioName);
}
>>>>>>> YOURS
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster,
PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
import com.opengamma.financial.tool.ToolContext;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
import com.opengamma.master.position.PositionDocument;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
<<<<<<< MINE
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
_portfolioDocument = openPortfolio(portfolioName);
}
=======
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = toolContext.getPortfolioMaster();
_positionMaster = toolContext.getPositionMaster();
_securityMaster = toolContext.getSecurityMaster();
_portfolioDocument = openPortfolio(portfolioName);
}
>>>>>>> YOURS
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster,
PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
Unstructured
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
<<<<<<< MINE
import com.opengamma.financial.tool.ToolContext;
=======
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.financial.loader.LoaderContext;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
>>>>>>> YOURS
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
<<<<<<< MINE
=======
import com.opengamma.master.position.ManageablePosition;
import com.opengamma.master.position.ManageableTrade;
import com.opengamma.master.position.PositionDocument;
>>>>>>> YOURS
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
<<<<<<< MINE
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = toolContext.getPortfolioMaster();
_positionMaster = toolContext.getPositionMaster();
_securityMaster = toolContext.getSecurityMaster();
=======
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
>>>>>>> YOURS
_portfolioDocument = openPortfolio(portfolioName);
}
<<<<<<< MINE
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster) {
=======
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster,
PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
>>>>>>> YOURS
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.loader.portfolio;
import java.util.Map;
<<<<<<< MINE
import com.opengamma.financial.tool.ToolContext;
=======
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.financial.loader.LoaderContext;
import com.opengamma.id.ObjectId;
import com.opengamma.id.VersionCorrection;
>>>>>>> YOURS
import com.opengamma.master.portfolio.ManageablePortfolio;
import com.opengamma.master.portfolio.ManageablePortfolioNode;
import com.opengamma.master.portfolio.PortfolioDocument;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.portfolio.PortfolioSearchRequest;
import com.opengamma.master.portfolio.PortfolioSearchResult;
<<<<<<< MINE
=======
import com.opengamma.master.position.ManageablePosition;
import com.opengamma.master.position.ManageableTrade;
import com.opengamma.master.position.PositionDocument;
>>>>>>> YOURS
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.security.SecurityMaster;
/**
* Portfolio reader.
*/
public class MasterPortfolioReader implements PortfolioReader {
private PortfolioMaster _portfolioMaster;
private PositionMaster _positionMaster;
private SecurityMaster _securityMaster;
private SecuritySource _securitySource;
private PortfolioDocument _portfolioDocument;
private ManageablePortfolioNode _currentNode;
<<<<<<< MINE
public MasterPortfolioReader(String portfolioName, ToolContext toolContext) {
_portfolioMaster = toolContext.getPortfolioMaster();
_positionMaster = toolContext.getPositionMaster();
_securityMaster = toolContext.getSecurityMaster();
=======
public MasterPortfolioReader(String portfolioName, LoaderContext loaderContext) {
_portfolioMaster = loaderContext.getPortfolioMaster();
_positionMaster = loaderContext.getPositionMaster();
_securityMaster = loaderContext.getSecurityMaster();
_securitySource = loaderContext.getSecuritySource();
>>>>>>> YOURS
_portfolioDocument = openPortfolio(portfolioName);
}
<<<<<<< MINE
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster, PositionMaster positionMaster, SecurityMaster securityMaster) {
=======
public MasterPortfolioReader(String portfolioName, PortfolioMaster portfolioMaster,
PositionMaster positionMaster, SecurityMaster securityMaster, SecuritySource securitySource) {
>>>>>>> YOURS
_portfolioMaster = portfolioMaster;
_positionMaster = positionMaster;
_securityMaster = securityMaster;
_securitySource = securitySource;
_portfolioDocument = openPortfolio(portfolioName);
}
@Override
public void writeTo(PortfolioWriter portfolioWriter) {
recursiveTraversePortfolioNodes(_portfolioDocument.getPortfolio().getRootNode(), portfolioWriter);
}
private void recursiveTraversePortfolioNodes(ManageablePortfolioNode node, PortfolioWriter portfolioWriter) {
// Extract and write rows for the current node's positions
for (ObjectId positionId : node.getPositionIds()) {
ManageablePosition position = _positionMaster.get(positionId, VersionCorrection.LATEST).getPosition();
// get securities here?
portfolioWriter.writePosition(position);
}
// Recursively traverse the child nodes
for (ManageablePortfolioNode child : node.getChildNodes()) {
// Find or create corresponding sub-node in destination portfolio and change to it
ManageablePortfolioNode writeNode = portfolioWriter.getCurrentNode();
ManageablePortfolioNode newNode = null;
for (ManageablePortfolioNode n : writeNode.getChildNodes()) {
if (n.getName() == node.getName()) {
newNode = n;
break;
}
}
if (newNode == null) {
newNode = new ManageablePortfolioNode(node.getName());
writeNode.addChildNode(newNode);
}
portfolioWriter.setCurrentNode(newNode);
// Recursive call
recursiveTraversePortfolioNodes(child, portfolioWriter);
// Change back up to parent node in destination portfolio
portfolioWriter.setCurrentNode(writeNode);
}
}
private PortfolioDocument openPortfolio(String portfolioName) {
// Check to see whether the portfolio already exists
PortfolioSearchRequest portSearchRequest = new PortfolioSearchRequest();
portSearchRequest.setName(portfolioName);
PortfolioSearchResult portSearchResult = _portfolioMaster.search(portSearchRequest);
ManageablePortfolio portfolio = portSearchResult.getFirstPortfolio();
PortfolioDocument portfolioDoc = portSearchResult.getFirstDocument();
if (portfolio == null || portfolioDoc == null) {
_currentNode = null;
return null;
}
// Set current node to the root node
_currentNode = portfolio.getRootNode();
return portfolioDoc;
}
}
Diff Result
No diff
Case 52 - java_ogplatform.rev_33740_8dd22..InterestRateFutureOptionHestonPresentValueFunction.java
Base
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.AbstractInstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.position.impl.SimpleTrade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.YieldCurveFunction;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* @deprecated Uses old properties
*/
@Deprecated
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private final String _forwardCurveName;
private final String _fundingCurveName;
private final String _surfaceName;
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
public InterestRateFutureOptionHestonPresentValueFunction(final String forwardCurveName, final String fundingCurveName, final String surfaceName) {
Validate.notNull(forwardCurveName, "forward curve name");
Validate.notNull(fundingCurveName, "funding curve name");
Validate.notNull(surfaceName, "surface name");
_forwardCurveName = forwardCurveName;
_fundingCurveName = fundingCurveName;
_surfaceName = surfaceName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final SimpleTrade trade = (SimpleTrade) target.getTrade();
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(trade);
final InstrumentDerivative irFutureOption = _dataConverter.convert(trade.getSecurity(), irFutureOptionDefinition, now, new String[] {_fundingCurveName, _forwardCurveName }, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs));
return Sets.newHashSet(new ComputedValue(getSpecification(target), price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
requirements.add(getSurfaceRequirement(target));
if (_forwardCurveName.equals(_fundingCurveName)) {
requirements.add(getCurveRequirement(target, _forwardCurveName, null, null));
return requirements;
}
requirements.add(getCurveRequirement(target, _forwardCurveName, _forwardCurveName, _fundingCurveName));
requirements.add(getCurveRequirement(target, _fundingCurveName, _forwardCurveName, _fundingCurveName));
final Trade trade = target.getTrade();
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Sets.newHashSet(getSpecification(target));
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency, properties);
}
private ValueSpecification getSpecification(final ComputationTarget target) {
return new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(YieldCurveFunction.PROPERTY_FORWARD_CURVE, _forwardCurveName)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, _fundingCurveName).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
}
private ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String advisoryForward, final String advisoryFunding) {
return YieldCurveFunction.getCurveRequirement(FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()), curveName, advisoryForward, advisoryFunding);
}
private class MyDerivativeVisitor extends AbstractInstrumentDerivativeVisitor<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs) {
_target = target;
_inputs = inputs;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, getYieldCurves(_target, _inputs));
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, getYieldCurves(_target, _inputs));
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
@SuppressWarnings("synthetic-access")
private YieldCurveBundle getYieldCurves(final ComputationTarget target, final FunctionInputs inputs) {
final ValueRequirement forwardCurveRequirement = getCurveRequirement(target, _forwardCurveName, null, null);
final Object forwardCurveObject = inputs.getValue(forwardCurveRequirement);
if (forwardCurveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + forwardCurveRequirement);
}
Object fundingCurveObject = null;
if (!_forwardCurveName.equals(_fundingCurveName)) {
final ValueRequirement fundingCurveRequirement = getCurveRequirement(target, _fundingCurveName, null, null);
fundingCurveObject = inputs.getValue(fundingCurveRequirement);
if (fundingCurveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + fundingCurveRequirement);
}
}
final YieldAndDiscountCurve forwardCurve = (YieldAndDiscountCurve) forwardCurveObject;
final YieldAndDiscountCurve fundingCurve = fundingCurveObject == null ? forwardCurve : (YieldAndDiscountCurve) fundingCurveObject;
return new YieldCurveBundle(new String[] {_fundingCurveName, _forwardCurveName }, new YieldAndDiscountCurve[] {fundingCurve, forwardCurve });
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final ValueRequirement surfacesRequirement = getSurfaceRequirement(target);
final Object surfacesObject = inputs.getValue(surfacesRequirement);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get " + surfacesRequirement);
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Don't know how this happened");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.AbstractInstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.position.impl.SimpleTrade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.YieldCurveFunction;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* @deprecated Uses old properties
*/
@Deprecated
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private final String _forwardCurveName;
private final String _fundingCurveName;
private final String _surfaceName;
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
public InterestRateFutureOptionHestonPresentValueFunction(final String forwardCurveName, final String fundingCurveName, final String surfaceName) {
Validate.notNull(forwardCurveName, "forward curve name");
Validate.notNull(fundingCurveName, "funding curve name");
Validate.notNull(surfaceName, "surface name");
_forwardCurveName = forwardCurveName;
_fundingCurveName = fundingCurveName;
_surfaceName = surfaceName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final SimpleTrade trade = (SimpleTrade) target.getTrade();
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(trade);
final InstrumentDerivative irFutureOption = _dataConverter.convert(trade.getSecurity(), irFutureOptionDefinition, now, new String[] {_fundingCurveName, _forwardCurveName }, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs));
return Sets.newHashSet(new ComputedValue(getSpecification(target), price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
requirements.add(getSurfaceRequirement(target));
if (_forwardCurveName.equals(_fundingCurveName)) {
requirements.add(getCurveRequirement(target, _forwardCurveName, null, null));
return requirements;
}
requirements.add(getCurveRequirement(target, _forwardCurveName, _forwardCurveName, _fundingCurveName));
requirements.add(getCurveRequirement(target, _fundingCurveName, _forwardCurveName, _fundingCurveName));
final Trade trade = target.getTrade();
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Sets.newHashSet(getSpecification(target));
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency, properties);
}
private ValueSpecification getSpecification(final ComputationTarget target) {
return new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(YieldCurveFunction.PROPERTY_FORWARD_CURVE, _forwardCurveName)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, _fundingCurveName).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
}
private ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String advisoryForward, final String advisoryFunding) {
return YieldCurveFunction.getCurveRequirement(FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()), curveName, advisoryForward, advisoryFunding);
}
private class MyDerivativeVisitor extends AbstractInstrumentDerivativeVisitor<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs) {
_target = target;
_inputs = inputs;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, getYieldCurves(_target, _inputs));
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, getYieldCurves(_target, _inputs));
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
@SuppressWarnings("synthetic-access")
private YieldCurveBundle getYieldCurves(final ComputationTarget target, final FunctionInputs inputs) {
final ValueRequirement forwardCurveRequirement = getCurveRequirement(target, _forwardCurveName, null, null);
final Object forwardCurveObject = inputs.getValue(forwardCurveRequirement);
if (forwardCurveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + forwardCurveRequirement);
}
Object fundingCurveObject = null;
if (!_forwardCurveName.equals(_fundingCurveName)) {
final ValueRequirement fundingCurveRequirement = getCurveRequirement(target, _fundingCurveName, null, null);
fundingCurveObject = inputs.getValue(fundingCurveRequirement);
if (fundingCurveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + fundingCurveRequirement);
}
}
final YieldAndDiscountCurve forwardCurve = (YieldAndDiscountCurve) forwardCurveObject;
final YieldAndDiscountCurve fundingCurve = fundingCurveObject == null ? forwardCurve : (YieldAndDiscountCurve) fundingCurveObject;
return new YieldCurveBundle(new String[] {_fundingCurveName, _forwardCurveName }, new YieldAndDiscountCurve[] {fundingCurve, forwardCurve });
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final ValueRequirement surfacesRequirement = getSurfaceRequirement(target);
final Object surfacesObject = inputs.getValue(surfacesRequirement);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get " + surfacesRequirement);
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Don't know how this happened");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
Left
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties resultProperties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties
.with(ValuePropertyNames.CURRENCY, currency.getCode())
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency.getUniqueId(), properties);
}
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties resultProperties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties
.with(ValuePropertyNames.CURRENCY, currency.getCode())
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency.getUniqueId(), properties);
}
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
Right
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.AbstractInstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.position.impl.SimpleTrade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.YieldCurveFunction;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* @deprecated Uses old properties
*/
@Deprecated
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private final String _forwardCurveName;
private final String _fundingCurveName;
private final String _surfaceName;
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
public InterestRateFutureOptionHestonPresentValueFunction(final String forwardCurveName, final String fundingCurveName, final String surfaceName) {
Validate.notNull(forwardCurveName, "forward curve name");
Validate.notNull(fundingCurveName, "funding curve name");
Validate.notNull(surfaceName, "surface name");
_forwardCurveName = forwardCurveName;
_fundingCurveName = fundingCurveName;
_surfaceName = surfaceName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final SimpleTrade trade = (SimpleTrade) target.getTrade();
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(trade);
final InstrumentDerivative irFutureOption = _dataConverter.convert(trade.getSecurity(), irFutureOptionDefinition, now, new String[] {_fundingCurveName, _forwardCurveName }, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs));
return Sets.newHashSet(new ComputedValue(getSpecification(target), price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
requirements.add(getSurfaceRequirement(target));
if (_forwardCurveName.equals(_fundingCurveName)) {
requirements.add(getCurveRequirement(target, _forwardCurveName, null, null));
return requirements;
}
requirements.add(getCurveRequirement(target, _forwardCurveName, _forwardCurveName, _fundingCurveName));
requirements.add(getCurveRequirement(target, _fundingCurveName, _forwardCurveName, _fundingCurveName));
final Trade trade = target.getTrade();
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Sets.newHashSet(getSpecification(target));
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
private ValueSpecification getSpecification(final ComputationTarget target) {
return new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(YieldCurveFunction.PROPERTY_FORWARD_CURVE, _forwardCurveName)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, _fundingCurveName).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
}
private ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String advisoryForward, final String advisoryFunding) {
return YieldCurveFunction.getCurveRequirement(FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()), curveName, advisoryForward, advisoryFunding);
}
private class MyDerivativeVisitor extends AbstractInstrumentDerivativeVisitor<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs) {
_target = target;
_inputs = inputs;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, getYieldCurves(_target, _inputs));
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, getYieldCurves(_target, _inputs));
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
@SuppressWarnings("synthetic-access")
private YieldCurveBundle getYieldCurves(final ComputationTarget target, final FunctionInputs inputs) {
final ValueRequirement forwardCurveRequirement = getCurveRequirement(target, _forwardCurveName, null, null);
final Object forwardCurveObject = inputs.getValue(forwardCurveRequirement);
if (forwardCurveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + forwardCurveRequirement);
}
Object fundingCurveObject = null;
if (!_forwardCurveName.equals(_fundingCurveName)) {
final ValueRequirement fundingCurveRequirement = getCurveRequirement(target, _fundingCurveName, null, null);
fundingCurveObject = inputs.getValue(fundingCurveRequirement);
if (fundingCurveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + fundingCurveRequirement);
}
}
final YieldAndDiscountCurve forwardCurve = (YieldAndDiscountCurve) forwardCurveObject;
final YieldAndDiscountCurve fundingCurve = fundingCurveObject == null ? forwardCurve : (YieldAndDiscountCurve) fundingCurveObject;
return new YieldCurveBundle(new String[] {_fundingCurveName, _forwardCurveName }, new YieldAndDiscountCurve[] {fundingCurve, forwardCurve });
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final ValueRequirement surfacesRequirement = getSurfaceRequirement(target);
final Object surfacesObject = inputs.getValue(surfacesRequirement);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get " + surfacesRequirement);
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Don't know how this happened");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.AbstractInstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.position.impl.SimpleTrade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.YieldCurveFunction;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* @deprecated Uses old properties
*/
@Deprecated
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private final String _forwardCurveName;
private final String _fundingCurveName;
private final String _surfaceName;
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
public InterestRateFutureOptionHestonPresentValueFunction(final String forwardCurveName, final String fundingCurveName, final String surfaceName) {
Validate.notNull(forwardCurveName, "forward curve name");
Validate.notNull(fundingCurveName, "funding curve name");
Validate.notNull(surfaceName, "surface name");
_forwardCurveName = forwardCurveName;
_fundingCurveName = fundingCurveName;
_surfaceName = surfaceName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final SimpleTrade trade = (SimpleTrade) target.getTrade();
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(trade);
final InstrumentDerivative irFutureOption = _dataConverter.convert(trade.getSecurity(), irFutureOptionDefinition, now, new String[] {_fundingCurveName, _forwardCurveName }, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs));
return Sets.newHashSet(new ComputedValue(getSpecification(target), price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
requirements.add(getSurfaceRequirement(target));
if (_forwardCurveName.equals(_fundingCurveName)) {
requirements.add(getCurveRequirement(target, _forwardCurveName, null, null));
return requirements;
}
requirements.add(getCurveRequirement(target, _forwardCurveName, _forwardCurveName, _fundingCurveName));
requirements.add(getCurveRequirement(target, _fundingCurveName, _forwardCurveName, _fundingCurveName));
final Trade trade = target.getTrade();
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Sets.newHashSet(getSpecification(target));
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
private ValueSpecification getSpecification(final ComputationTarget target) {
return new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(YieldCurveFunction.PROPERTY_FORWARD_CURVE, _forwardCurveName)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, _fundingCurveName).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
}
private ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String advisoryForward, final String advisoryFunding) {
return YieldCurveFunction.getCurveRequirement(FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()), curveName, advisoryForward, advisoryFunding);
}
private class MyDerivativeVisitor extends AbstractInstrumentDerivativeVisitor<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs) {
_target = target;
_inputs = inputs;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, getYieldCurves(_target, _inputs));
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, getYieldCurves(_target, _inputs));
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
@SuppressWarnings("synthetic-access")
private YieldCurveBundle getYieldCurves(final ComputationTarget target, final FunctionInputs inputs) {
final ValueRequirement forwardCurveRequirement = getCurveRequirement(target, _forwardCurveName, null, null);
final Object forwardCurveObject = inputs.getValue(forwardCurveRequirement);
if (forwardCurveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + forwardCurveRequirement);
}
Object fundingCurveObject = null;
if (!_forwardCurveName.equals(_fundingCurveName)) {
final ValueRequirement fundingCurveRequirement = getCurveRequirement(target, _fundingCurveName, null, null);
fundingCurveObject = inputs.getValue(fundingCurveRequirement);
if (fundingCurveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + fundingCurveRequirement);
}
}
final YieldAndDiscountCurve forwardCurve = (YieldAndDiscountCurve) forwardCurveObject;
final YieldAndDiscountCurve fundingCurve = fundingCurveObject == null ? forwardCurve : (YieldAndDiscountCurve) fundingCurveObject;
return new YieldCurveBundle(new String[] {_fundingCurveName, _forwardCurveName }, new YieldAndDiscountCurve[] {fundingCurve, forwardCurve });
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final ValueRequirement surfacesRequirement = getSurfaceRequirement(target);
final Object surfacesObject = inputs.getValue(surfacesRequirement);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get " + surfacesRequirement);
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Don't know how this happened");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
MergeMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties().with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode()).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName).with(ValuePropertyNames.SURFACE, surfaceName).with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston").with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties resultProperties = createValueProperties().with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode()).withAny(ValuePropertyNames.SURFACE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston").with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access") final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties().with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode()).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName).with(ValuePropertyNames.SURFACE, surfaceName).with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston").with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties resultProperties = createValueProperties().with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode()).withAny(ValuePropertyNames.SURFACE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston").with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access") final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
KeepBothMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties().with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode()).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName).with(ValuePropertyNames.SURFACE, surfaceName).with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston").with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties resultProperties = createValueProperties().with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode()).withAny(ValuePropertyNames.SURFACE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston").with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, surfaceName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency.getUniqueId(), properties);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access") final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties().with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode()).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName).with(ValuePropertyNames.SURFACE, surfaceName).with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston").with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties resultProperties = createValueProperties().with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode()).withAny(ValuePropertyNames.SURFACE).withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG).with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston").with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, surfaceName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency.getUniqueId(), properties);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName).with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access") final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
Safe
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties resultProperties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
<<<<<<< MINE
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
=======
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties
.with(ValuePropertyNames.CURRENCY, currency.getCode())
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency.getUniqueId(), properties);
}
>>>>>>> YOURS
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties resultProperties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
<<<<<<< MINE
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
=======
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties
.with(ValuePropertyNames.CURRENCY, currency.getCode())
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency.getUniqueId(), properties);
}
>>>>>>> YOURS
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
Unstructured
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
<<<<<<< MINE
final ValueProperties resultProperties = createValueProperties()
=======
return Sets.newHashSet(getSpecification(target));
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
private ValueSpecification getSpecification(final ComputationTarget target) {
return new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
>>>>>>> YOURS
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties
.with(ValuePropertyNames.CURRENCY, currency.getCode())
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency.getUniqueId(), properties);
}
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.irfutureoption;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Clock;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitorAdapter;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFuture;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionMarginTransaction;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumSecurity;
import com.opengamma.analytics.financial.interestrate.future.derivative.InterestRateFutureOptionPremiumTransaction;
import com.opengamma.analytics.financial.interestrate.future.method.InterestRateFutureDiscountingMethod;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.analytics.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.analytics.financial.model.option.pricing.fourier.FourierPricer;
import com.opengamma.analytics.financial.model.option.pricing.fourier.HestonCharacteristicExponent;
import com.opengamma.analytics.math.integration.RungeKuttaIntegrator1D;
import com.opengamma.analytics.math.surface.InterpolatedDoublesSurface;
import com.opengamma.core.config.ConfigSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.position.Trade;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.FixedIncomeConverterDataProvider;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionSecurityConverter;
import com.opengamma.financial.analytics.conversion.InterestRateFutureOptionTradeConverter;
import com.opengamma.financial.analytics.ircurve.calcconfig.ConfigDBCurveCalculationConfigSource;
import com.opengamma.financial.analytics.ircurve.calcconfig.MultiCurveCalculationConfig;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.YieldCurveFunctionUtils;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.analytics.volatility.fittedresults.HestonFittedSurfaces;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.IRFutureOptionSecurity;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
*/
public class InterestRateFutureOptionHestonPresentValueFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(InterestRateFutureOptionHestonPresentValueFunction.class);
private InterestRateFutureOptionTradeConverter _converter;
private FixedIncomeConverterDataProvider _dataConverter;
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context);
final HistoricalTimeSeriesResolver timeSeriesResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
_converter = new InterestRateFutureOptionTradeConverter(new InterestRateFutureOptionSecurityConverter(holidaySource, conventionSource, regionSource, securitySource));
_dataConverter = new FixedIncomeConverterDataProvider(conventionSource, timeSeriesResolver);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final ValueRequirement desiredValue = desiredValues.iterator().next();
final Clock snapshotClock = executionContext.getValuationClock();
final ZonedDateTime now = snapshotClock.zonedDateTime();
final HistoricalTimeSeriesBundle timeSeries = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final InstrumentDefinition<InstrumentDerivative> irFutureOptionDefinition = (InstrumentDefinition<InstrumentDerivative>) _converter.convert(target.getTrade());
final String surfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String curveCalculationConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final ConfigSource configSource = OpenGammaExecutionContext.getConfigSource(executionContext);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
throw new OpenGammaRuntimeException("Could not find curve calculation configuration named " + curveCalculationConfigName);
}
final String[] curveNames = curveCalculationConfig.getYieldCurveNames();
final YieldCurveBundle curves = YieldCurveFunctionUtils.getYieldCurves(inputs, curveCalculationConfig);
final InstrumentDerivative irFutureOption = _dataConverter.convert(target.getTrade().getSecurity(), irFutureOptionDefinition, now, curveNames, timeSeries);
final double price = irFutureOption.accept(new MyDerivativeVisitor(target, inputs, curves));
final ValueSpecification valueSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get());
return Sets.newHashSet(new ComputedValue(valueSpecification, price));
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getTrade().getSecurity() instanceof IRFutureOptionSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
final Trade trade = target.getTrade();
final Currency currency = FinancialSecurityUtils.getCurrency(trade.getSecurity());
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
return null;
}
final String surfaceName = Iterables.getOnlyElement(surfaceNames) + "_" + IRFutureOptionFunctionHelper.getFutureOptionPrefix(target);
final Set<String> curveCalculationConfigNames = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveCalculationConfigNames == null || curveCalculationConfigNames.size() != 1) {
return null;
}
final String curveCalculationConfigName = curveCalculationConfigNames.iterator().next();
final ConfigSource configSource = OpenGammaCompilationContext.getConfigSource(context);
final ConfigDBCurveCalculationConfigSource curveCalculationConfigSource = new ConfigDBCurveCalculationConfigSource(configSource);
final MultiCurveCalculationConfig curveCalculationConfig = curveCalculationConfigSource.getConfig(curveCalculationConfigName);
if (curveCalculationConfig == null) {
s_logger.error("Could not find curve calculation configuration named " + curveCalculationConfigName);
return null;
}
final UniqueIdentifiable uniqueId = curveCalculationConfig.getUniqueId();
if (Currency.OBJECT_SCHEME.equals(uniqueId.getUniqueId().getScheme()) && !(uniqueId.getUniqueId().getValue().equals(currency.getCode()))) {
return null;
}
requirements.addAll(YieldCurveFunctionUtils.getCurveRequirements(curveCalculationConfig, curveCalculationConfigSource));
requirements.add(getSurfaceRequirement(target, surfaceName));
final Set<ValueRequirement> timeSeriesRequirements = _dataConverter.getConversionTimeSeriesRequirements(trade.getSecurity(), _converter.convert(trade));
if (timeSeriesRequirements == null) {
return null;
}
requirements.addAll(timeSeriesRequirements);
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
<<<<<<< MINE
final ValueProperties resultProperties = createValueProperties()
=======
return Sets.newHashSet(getSpecification(target));
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties.with(ValuePropertyNames.CURRENCY, currency.getCode()).with(ValuePropertyNames.SURFACE, _surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, ComputationTargetSpecification.of(currency), properties);
}
private ValueSpecification getSpecification(final ComputationTarget target) {
return new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), createValueProperties()
>>>>>>> YOURS
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity()).getCode())
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.with(ValuePropertyNames.SMILE_FITTING_METHOD, "Heston")
.with(ValuePropertyNames.CALCULATION_METHOD, "Fourier").get();
final ValueSpecification resultSpecification = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), resultProperties);
return Sets.newHashSet(resultSpecification);
}
private ValueRequirement getSurfaceRequirement(final ComputationTarget target, final String surfaceName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties properties = ValueProperties
.with(ValuePropertyNames.CURRENCY, currency.getCode())
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.IR_FUTURE_OPTION).get();
return new ValueRequirement(ValueRequirementNames.HESTON_SURFACES, currency.getUniqueId(), properties);
}
private class MyDerivativeVisitor extends InstrumentDerivativeVisitorAdapter<YieldCurveBundle, Double> {
private final double _alpha = -0.5;
private final double _tolerance = 0.001;
private final InterestRateFutureDiscountingMethod _futurePricer = InterestRateFutureDiscountingMethod.getInstance();
private final FourierPricer _fourierPricer = new FourierPricer(new RungeKuttaIntegrator1D());
private final ComputationTarget _target;
private final FunctionInputs _inputs;
private final YieldCurveBundle _curves;
public MyDerivativeVisitor(final ComputationTarget target, final FunctionInputs inputs, final YieldCurveBundle curves) {
_target = target;
_inputs = inputs;
_curves = curves;
}
@Override
public Double visitInterestRateFutureOptionPremiumSecurity(final InterestRateFutureOptionPremiumSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 0);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionPremiumTransaction(final InterestRateFutureOptionPremiumTransaction option) {
return visitInterestRateFutureOptionPremiumSecurity(option.getUnderlyingOption());
}
@Override
public Double visitInterestRateFutureOptionMarginSecurity(final InterestRateFutureOptionMarginSecurity option) {
final double t = option.getExpirationTime();
final double k = option.getStrike();
final boolean isCall = option.isCall();
final InterestRateFuture irFuture = option.getUnderlyingFuture();
final double f = 1 - _futurePricer.price(irFuture, _curves);
final BlackFunctionData blackData = new BlackFunctionData(f, 1, 1e-6);
final EuropeanVanillaOption vanillaOption = new EuropeanVanillaOption(k, t, isCall);
final HestonCharacteristicExponent ce = getModelParameters(_target, _inputs, t, k);
return _fourierPricer.price(blackData, vanillaOption, ce, _alpha, _tolerance, true);
}
@Override
public Double visitInterestRateFutureOptionMarginTransaction(final InterestRateFutureOptionMarginTransaction option) {
return visitInterestRateFutureOptionMarginSecurity(option.getUnderlyingOption());
}
private HestonCharacteristicExponent getModelParameters(final ComputationTarget target, final FunctionInputs inputs, final double t, final double k) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
@SuppressWarnings("synthetic-access")
final Object surfacesObject = inputs.getValue(ValueRequirementNames.HESTON_SURFACES);
if (surfacesObject == null) {
throw new OpenGammaRuntimeException("Could not get heston surface");
}
final HestonFittedSurfaces surfaces = (HestonFittedSurfaces) surfacesObject;
if (!surfaces.getCurrency().equals(currency)) {
throw new OpenGammaRuntimeException("Currency mismatch between heston curves and trade");
}
final InterpolatedDoublesSurface kappaSurface = surfaces.getKappaSurface();
final InterpolatedDoublesSurface thetaSurface = surfaces.getThetaSurface();
final InterpolatedDoublesSurface vol0Surface = surfaces.getVol0Surface();
final InterpolatedDoublesSurface omegaSurface = surfaces.getOmegaSurface();
final InterpolatedDoublesSurface rhoSurface = surfaces.getRhoSurface();
return new HestonCharacteristicExponent(kappaSurface.getZValue(t, k), thetaSurface.getZValue(t, k), vol0Surface.getZValue(t, k), omegaSurface.getZValue(t, k), rhoSurface.getZValue(t, k));
}
}
}
Diff Result
No diff
Case 53 - java_ogplatform.rev_33740_8dd22..PortfolioCompiler.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ExecutorService;
import com.opengamma.DataNotFoundException;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.PositionSource;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Compiles Portfolio requirements into the dependency graphs.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection, boolean forcePortfolioResolution) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '" + positionSource +
"' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ExecutorService;
import com.opengamma.DataNotFoundException;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.PositionSource;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Compiles Portfolio requirements into the dependency graphs.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection, boolean forcePortfolioResolution) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '" + positionSource +
"' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ExecutorService;
import com.opengamma.DataNotFoundException;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.PositionSource;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection,
boolean forcePortfolioResolution) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ExecutorService;
import com.opengamma.DataNotFoundException;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.PositionSource;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection,
boolean forcePortfolioResolution) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Compiles Portfolio requirements into the dependency graphs.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required by a full compilation, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext);
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
// Add portfolio requirements to the dependency graph
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder, resolutions);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
try {
builder.waitForDependencyGraphBuild();
} catch (final InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Compiles Portfolio requirements into the dependency graphs.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required by a full compilation, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext);
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
// Add portfolio requirements to the dependency graph
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder, resolutions);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
try {
builder.waitForDependencyGraphBuild();
} catch (final InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
<<<<<<< MINE
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
=======
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
>>>>>>> YOURS
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
<<<<<<< MINE
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
=======
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
>>>>>>> YOURS
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
<<<<<<< MINE
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
=======
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
>>>>>>> YOURS
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
<<<<<<< MINE
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
=======
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
>>>>>>> YOURS
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection,
boolean forcePortfolioResolution) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required by a full compilation, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext);
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
// Add portfolio requirements to the dependency graph
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder, resolutions);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
try {
builder.waitForDependencyGraphBuild();
} catch (final InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
<<<<<<< MINE
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
=======
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
>>>>>>> YOURS
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
<<<<<<< MINE
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
=======
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
>>>>>>> YOURS
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection,
boolean forcePortfolioResolution) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required by a full compilation, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext);
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
// Add portfolio requirements to the dependency graph
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder, resolutions);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
try {
builder.waitForDependencyGraphBuild();
} catch (final InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
<<<<<<< MINE
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
=======
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
>>>>>>> YOURS
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
<<<<<<< MINE
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
=======
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
>>>>>>> YOURS
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
<<<<<<< MINE
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext);
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
// Add portfolio requirements to the dependency graph
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder, resolutions);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
try {
builder.waitForDependencyGraphBuild();
} catch (final InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
=======
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection,
boolean forcePortfolioResolution) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
>>>>>>> YOURS
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
<<<<<<< MINE
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
=======
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
>>>>>>> YOURS
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
<<<<<<< MINE
=======
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
>>>>>>> YOURS
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
<<<<<<< MINE
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
=======
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
>>>>>>> YOURS
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param versionCorrection the version-correction at which to operate, not null
* @param forcePortfolioResolution true if there are external portfolio targets, false otherwise
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
<<<<<<< MINE
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext);
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
// Add portfolio requirements to the dependency graph
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder, resolutions);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
try {
builder.waitForDependencyGraphBuild();
} catch (final InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
=======
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection,
boolean forcePortfolioResolution) {
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = forcePortfolioResolution ? getPortfolio(compilationContext, versionCorrection) : null;
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext, versionCorrection);
}
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
>>>>>>> YOURS
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
<<<<<<< MINE
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
=======
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
>>>>>>> YOURS
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved
* {@link Security} objects for each {@link Position} within the portfolio. Note however that
* any underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @param versionCorrection the version-correction at which the portfolio is required, not null
*/
<<<<<<< MINE
=======
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
}
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
}
return portfolio;
}
>>>>>>> YOURS
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName() + "' contains required portfolio outputs, but it does not reference a portfolio.");
}
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
}
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
<<<<<<< MINE
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
=======
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
>>>>>>> YOURS
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required by a full compilation, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
<<<<<<< MINE
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection,
boolean forcePortfolioResolution) {
=======
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
>>>>>>> YOURS
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = null;
<<<<<<< MINE
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
=======
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
>>>>>>> YOURS
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext);
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
<<<<<<< MINE
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
=======
// Add portfolio requirements to the dependency graph
>>>>>>> YOURS
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
<<<<<<< MINE
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
=======
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder, resolutions);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
>>>>>>> YOURS
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (final InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
<<<<<<< MINE
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
=======
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
>>>>>>> YOURS
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
<<<<<<< MINE
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
=======
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
>>>>>>> YOURS
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
<<<<<<< MINE
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
=======
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
>>>>>>> YOURS
}
<<<<<<< MINE
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
=======
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
>>>>>>> YOURS
}
<<<<<<< MINE
return portfolio;
=======
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
>>>>>>> YOURS
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.impl.PortfolioNodeTraverser;
import com.opengamma.core.position.impl.SimplePortfolio;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.view.ResultModelDefinition;
import com.opengamma.engine.view.ResultOutputMode;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Resolves the specified portfolio's securities and adds value requirements (targets) to the graph builder in the
* compilation context, thus triggering the compilation of the dependency graphs. The identification of value
* requirements is done through a parallel traversal on the portfolio nodes using PortfolioCompilerTraversalCallback,
* which actually produces the value requirements and adds them to the graph builder.
*/
public final class PortfolioCompiler {
private PortfolioCompiler() {
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService,
final SecuritySource securitySource, final VersionCorrection versionCorrection) {
Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
}
// --------------------------------------------------------------------------
/**
* Adds portfolio targets to the dependency graphs as required by a full compilation, and fully resolves the portfolio structure.
*
* @param compilationContext the context of the view definition compilation
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets were required, null otherwise.
*/
<<<<<<< MINE
protected static Portfolio execute(ViewCompilationContext compilationContext, VersionCorrection versionCorrection,
boolean forcePortfolioResolution) {
=======
protected static Portfolio executeFull(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
>>>>>>> YOURS
// Everything we do here is geared towards the avoidance of resolution (of portfolios, positions, securities)
// wherever possible, to prevent needless dependencies (on a position master, security master) when a view never
// really has them.
if (!isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
// Doesn't even matter if the portfolio can't be resolved - we're not outputting anything at the portfolio level
// (which might be because the user knows the portfolio can't be resolved right now) so there are no portfolio
// targets to add to the dependency graph.
return null;
}
Portfolio portfolio = null;
<<<<<<< MINE
// For each configuration in the view def, add portfolio requirements to dep graph, resolve the portfolio and
// start the graph building job
for (ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
=======
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
>>>>>>> YOURS
if (calcConfig.getAllPortfolioRequirements().size() == 0) {
// No portfolio requirements for this calculation configuration - avoid further processing.
continue;
}
// Actually need the portfolio now
if (portfolio == null) {
portfolio = getPortfolio(compilationContext);
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
<<<<<<< MINE
// Add portfolio requirements to the dependency graph:
// Use PortfolioNodeTraverser to traverse the portfolio tree looking for value requirements.
// PortfolioCompilerTraversalCallback passes any found value requirements to the dep graph builder,
// and any related graph building may immediately proceed in the background
=======
// Add portfolio requirements to the dependency graph
>>>>>>> YOURS
final DependencyGraphBuilder builder = compilationContext.getBuilder(calcConfig.getName());
<<<<<<< MINE
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder);
PortfolioNodeTraverser.parallel(traversalCallback,
compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
=======
final PortfolioCompilerTraversalCallback traversalCallback = new PortfolioCompilerTraversalCallback(calcConfig, builder, resolutions);
PortfolioNodeTraverser.parallel(traversalCallback, compilationContext.getServices().getExecutorService()).traverse(portfolio.getRootNode());
>>>>>>> YOURS
// TODO: Use a heuristic to decide whether to let the graph builds run in parallel, or sequentially. We will force sequential builds for the time being.
// Wait for the current config's dependency graph to be built before moving to the next view calc config
try {
builder.waitForDependencyGraphBuild();
} catch (final InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
return portfolio;
}
/**
* Adds portfolio targets to dependency graphs as required by an incremental compilation (nothing), and fully resolved the portfolio structure.
*
* @param compilationContext the context of the view definition compiler
* @param resolutions the resolutions within the portfolio structure (for example the position object identifiers and underlying security references)
* @return the fully-resolved portfolio structure if any portfolio targets are required, null otherwise
*/
protected static Portfolio executeIncremental(final ViewCompilationContext compilationContext, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
if (isPortfolioOutputEnabled(compilationContext.getViewDefinition())) {
Portfolio portfolio = null;
for (final ViewCalculationConfiguration calcConfig : compilationContext.getViewDefinition().getAllCalculationConfigurations()) {
if (!calcConfig.getAllPortfolioRequirements().isEmpty()) {
portfolio = getPortfolio(compilationContext);
break;
}
}
if (portfolio != null) {
resolutions.putIfAbsent(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, compilationContext.getViewDefinition().getPortfolioId()), portfolio.getUniqueId());
}
return portfolio;
} else {
return null;
}
}
/**
* Tests whether the view has portfolio outputs enabled.
*
* @param viewDefinition the view definition
* @return true if there is at least one portfolio target
*/
<<<<<<< MINE
private static boolean isPortfolioOutputEnabled(ViewDefinition viewDefinition) {
ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE
|| resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
=======
private static boolean isPortfolioOutputEnabled(final ViewDefinition viewDefinition) {
final ResultModelDefinition resultModelDefinition = viewDefinition.getResultModelDefinition();
return resultModelDefinition.getPositionOutputMode() != ResultOutputMode.NONE || resultModelDefinition.getAggregatePositionOutputMode() != ResultOutputMode.NONE;
>>>>>>> YOURS
}
/**
* Fully resolves the portfolio structure for a view. A fully resolved structure has resolved {@link Security} objects for each {@link Position} within the portfolio. Note however that any
* underlying or related data referenced by a security will not be resolved at this stage.
*
* @param compilationContext the compilation context containing the view being compiled, not null
* @return the resolved portfolio, not null
*/
<<<<<<< MINE
private static Portfolio getPortfolio(ViewCompilationContext compilationContext, VersionCorrection versionCorrection) {
// Get the portfolio ID from the view definition
UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
=======
private static Portfolio getPortfolio(final ViewCompilationContext compilationContext) {
final UniqueId portfolioId = compilationContext.getViewDefinition().getPortfolioId();
>>>>>>> YOURS
if (portfolioId == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but it does not reference a portfolio.");
}
<<<<<<< MINE
// Get the position source from the compilation context
PositionSource positionSource = compilationContext.getServices().getComputationTargetResolver().getPositionSource();
if (positionSource == null) {
throw new OpenGammaRuntimeException("The view definition '" + compilationContext.getViewDefinition().getName()
+ "' contains required portfolio outputs, but the compiler does not have access to a position source.");
=======
final ComputationTargetResolver resolver = compilationContext.getServices().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetResolver.AtVersionCorrection versioned = resolver.atVersionCorrection(compilationContext.getResolverVersionCorrection());
final ComputationTargetSpecification specification = versioned.getSpecificationResolver().getTargetSpecification(new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, portfolioId));
if (specification == null) {
throw new OpenGammaRuntimeException("Unable to identify portfolio '" + portfolioId + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
>>>>>>> YOURS
}
<<<<<<< MINE
// Resolve the portfolio
// NOTE jonathan 2011-11-11 -- not sure what the right thing to do is here. Reasonable compromise seems to be to
// follow the cycle VersionCorrection if no specific portfolio version has been specified, otherwise to use the
// exact portfolio version requested (which is an important requirement for e.g. PnL Explain). Perhaps the
// portfolio should be loaded independently of the cycle version correction, so latest always means latest?
Portfolio portfolio;
try {
if (portfolioId.isVersioned()) {
portfolio = positionSource.getPortfolio(portfolioId);
} else {
portfolio = positionSource.getPortfolio(portfolioId.getObjectId(), versionCorrection);
}
} catch (DataNotFoundException ex) {
throw new OpenGammaRuntimeException("Unable to resolve portfolio '" + portfolioId + "' in position source '"
+ positionSource + "' used by view definition '" + compilationContext.getViewDefinition().getName() + "'", ex);
=======
final ComputationTarget target = versioned.resolve(specification);
if (target == null) {
throw new OpenGammaRuntimeException("Unable to resolve '" + specification + "' for view '" + compilationContext.getViewDefinition().getName() + "'");
>>>>>>> YOURS
}
<<<<<<< MINE
return portfolio;
=======
return target.getValue(ComputationTargetType.PORTFOLIO);
}
/**
* Resolves the securities in the portfolio at the latest version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource) {
return resolvePortfolio(portfolio, executorService, securitySource, VersionCorrection.LATEST);
}
/**
* Resolves the securities in the portfolio at the given version-correction.
*
* @param portfolio the portfolio to resolve, not null
* @param executorService the threading service, not null
* @param securitySource the security source, not null
* @param versionCorrection the version-correction for security resolution, not null
* @return the resolved portfolio, not null
*/
public static Portfolio resolvePortfolio(final Portfolio portfolio, final ExecutorService executorService, final SecuritySource securitySource, final VersionCorrection versionCorrection) {
final Portfolio cloned = new SimplePortfolio(portfolio);
new SecurityLinkResolver(executorService, securitySource, versionCorrection).resolveSecurities(cloned.getRootNode());
return cloned;
>>>>>>> YOURS
}
}
Diff Result
No diff
Case 54 - java_ogplatform.rev_412e2_f4b6f..InMemoryConfigMaster.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifiables;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectIdentifier, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
UniqueIdentifiables.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueIdentifier uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifiables;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectIdentifier, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
UniqueIdentifiables.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueIdentifier uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifiables;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectIdentifier, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
UniqueIdentifiables.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueIdentifier uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifiables;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectIdentifier, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
UniqueIdentifiables.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueIdentifier uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
=======
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
=======
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
=======
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
=======
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
=======
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
=======
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
=======
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.IdUtils;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
=======
public InMemoryConfigMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
=======
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryConfigMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
=======
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
}
Diff Result
No diff
Case 55 - java_ogplatform.rev_412e2_f4b6f..InMemoryHistoricalTimeSeriesMaster.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectIdentifier, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectIdentifier, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectIdentifier objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueIdentifier uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectIdentifier objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectIdentifier objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectIdentifier objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectIdentifier objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getIdentifiers(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getIdentifiers().asIdentifierBundle().getIdentifiers().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectIdentifier, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectIdentifier, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectIdentifier objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueIdentifier uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectIdentifier objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectIdentifier objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectIdentifier objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectIdentifier objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getIdentifiers(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getIdentifiers().asIdentifierBundle().getIdentifiers().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectIdentifier, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectIdentifier, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectIdentifier objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueIdentifier uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectIdentifier objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectIdentifier objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectIdentifier objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectIdentifier objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getIdentifiers(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getIdentifiers().asIdentifierBundle().getIdentifiers().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectIdentifier, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectIdentifier, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectIdentifier objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueIdentifier uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectIdentifier objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectIdentifier objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectIdentifier objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueIdentifier removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectIdentifier objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getIdentifiers(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getIdentifiers().asIdentifierBundle().getIdentifiers().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
=======
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
=======
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
=======
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.historicaltimeseries.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import org.apache.commons.lang.StringUtils;
import org.joda.beans.JodaBeanUtils;
import com.google.common.base.Objects;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
import com.opengamma.util.timeseries.DoubleTimeSeriesOperators;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.MutableLocalDateDoubleTimeSeries;
/**
* An in-memory implementation of a historical time-series master.
*/
public class InMemoryHistoricalTimeSeriesMaster implements HistoricalTimeSeriesMaster {
/**
* The default scheme used for each {@link UniqueId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemHts";
/**
* A cache of time-series info by identifier.
*/
private final ConcurrentMap<ObjectId, HistoricalTimeSeriesInfoDocument> _storeInfo = new ConcurrentHashMap<ObjectId, HistoricalTimeSeriesInfoDocument>();
/**
* A cache of time-series points by identifier.
*/
private final ConcurrentMap<ObjectId, LocalDateDoubleTimeSeries> _storePoints = new ConcurrentHashMap<ObjectId, LocalDateDoubleTimeSeries>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryHistoricalTimeSeriesMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
=======
public InMemoryHistoricalTimeSeriesMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
=======
public InMemoryHistoricalTimeSeriesMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataField());
}
result.getDataFields().addAll(types);
}
if (request.isDataSources()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataSource());
}
result.getDataSources().addAll(types);
}
if (request.isDataProviders()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getDataProvider());
}
result.getDataProviders().addAll(types);
}
if (request.isObservationTimes()) {
Set<String> types = new HashSet<String>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
types.add(doc.getInfo().getObservationTime());
}
result.getObservationTimes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<HistoricalTimeSeriesInfoDocument> list = new ArrayList<HistoricalTimeSeriesInfoDocument>();
for (HistoricalTimeSeriesInfoDocument doc : _storeInfo.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectKey, VersionCorrection versionCorrection) {
validateId(objectKey);
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ObjectId objectId = objectKey.getObjectId();
final HistoricalTimeSeriesInfoDocument document = _storeInfo.get(objectId);
if (document == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final HistoricalTimeSeriesInfoDocument cloned = JodaBeanUtils.clone(document);
final ManageableHistoricalTimeSeriesInfo info = cloned.getInfo();
info.setUniqueId(uniqueId);
final Instant now = Instant.now();
cloned.setVersionFromInstant(now);
cloned.setCorrectionFromInstant(now);
cloned.getInfo().setTimeSeriesObjectId(objectId);
_storeInfo.put(objectId, cloned);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return cloned;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument update(final HistoricalTimeSeriesInfoDocument document) {
validateDocument(document);
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final HistoricalTimeSeriesInfoDocument storedDocument = _storeInfo.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_storeInfo.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
validateId(uniqueId);
if (_storeInfo.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Historical time-series not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument correct(final HistoricalTimeSeriesInfoDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(HistoricalTimeSeriesInfoHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final HistoricalTimeSeriesInfoHistoryResult result = new HistoricalTimeSeriesInfoHistoryResult();
final HistoricalTimeSeriesInfoDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(uniqueId.getObjectId(), VersionCorrection.LATEST, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectKey, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
validateId(objectKey);
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
final Instant now = Instant.now();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
if (_storeInfo.get(objectId) == null) {
throw new DataNotFoundException("Historical time-series not found: " + objectId);
}
existingSeries = new ArrayLocalDateDoubleTimeSeries();
}
final LocalDateDoubleTimeSeries subSeries = existingSeries.subSeries(fromDateInclusive, toDateInclusive).toLocalDateDoubleTimeSeries();
final ManageableHistoricalTimeSeries result = new ManageableHistoricalTimeSeries();
result.setUniqueId(objectId.atLatestVersion());
result.setTimeSeries(subSeries);
result.setEarliest(existingSeries.getEarliestTime());
result.setLatest(existingSeries.getLatestTime());
result.setVersionInstant(now);
result.setCorrectionInstant(now);
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
final LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
if (series.getEarliestTime().isBefore(existingSeries.getLatestTime())) {
throw new IllegalArgumentException("Unable to add time-series as dates overlap");
}
LocalDateDoubleTimeSeries newSeries = existingSeries.noIntersectionOperation(series).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.UPDATED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectKey, "objectKey");
ArgumentChecker.notNull(series, "series");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries != null) {
LocalDateDoubleTimeSeries newSeries = existingSeries.unionOperate(series, DoubleTimeSeriesOperators.SECOND_OPERATOR).toLocalDateDoubleTimeSeries();
if (_storePoints.replace(objectId, existingSeries, newSeries) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
} else {
if (_storePoints.putIfAbsent(objectId, series) != null) {
throw new IllegalArgumentException("Concurrent modification");
}
}
final Instant now = Instant.now();
<<<<<<< MINE
final UniqueIdentifier uniqueId = objectId.atLatestVersion();
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, uniqueId, now);
=======
final UniqueId uniqueId = objectId.atLatestVersion();
changeManager().masterChanged(MasterChangedType.CORRECTED, uniqueId, uniqueId, now);
>>>>>>> YOURS
return uniqueId;
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(ObjectIdentifiable objectKey, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectKey, "objectKey");
fromDateInclusive = Objects.firstNonNull(fromDateInclusive, LocalDate.of(1000, 1, 1)); // TODO: JSR-310 min/max date
toDateInclusive = Objects.firstNonNull(toDateInclusive, LocalDate.of(9999, 1, 1));
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
final ObjectId objectId = objectKey.getObjectId();
LocalDateDoubleTimeSeries existingSeries = _storePoints.get(objectId);
if (existingSeries == null) {
return objectId.atLatestVersion();
}
MutableLocalDateDoubleTimeSeries mutableTS = existingSeries.toMutableLocalDateDoubleTimeSeries();
for (Iterator<LocalDate> it = mutableTS.timeIterator(); it.hasNext(); ) {
LocalDate date = it.next();
if (date.isBefore(fromDateInclusive) == false && date.isAfter(toDateInclusive) == false) {
it.remove();
}
}
if (_storePoints.replace(objectId, existingSeries, mutableTS.toLocalDateDoubleTimeSeries()) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return objectId.atLatestVersion();
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
private long validateId(ObjectIdentifiable objectId) {
ArgumentChecker.notNull(objectId, "objectId");
try {
return Long.parseLong(objectId.getObjectId().getValue());
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid objectId " + objectId);
}
}
private void validateDocument(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
if (document.getUniqueId() != null) {
validateId(document.getUniqueId());
}
ArgumentChecker.notNull(document.getInfo(), "document.series");
ArgumentChecker.notNull(document.getInfo().getExternalIdBundle(), "document.series.identifiers");
ArgumentChecker.isTrue(document.getInfo().getExternalIdBundle().toBundle().getExternalIds().size() > 0, "document.series.identifiers must not be empty");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataSource()), "document.series.dataSource must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataProvider()), "document.series.dataProvider must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getDataField()), "document.series.dataField must not be blank");
ArgumentChecker.isTrue(StringUtils.isNotBlank(document.getInfo().getObservationTime()), "document.series.observationTime must not be blank");
}
}
Diff Result
No diff
Case 56 - java_ogplatform.rev_412e2_f4b6f..InMemorySecurityMaster.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, SecurityDocument> _store = new ConcurrentHashMap<ObjectIdentifier, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, SecurityDocument> _store = new ConcurrentHashMap<ObjectIdentifier, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, SecurityDocument> _store = new ConcurrentHashMap<ObjectIdentifier, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, SecurityDocument> _store = new ConcurrentHashMap<ObjectIdentifier, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueIdentifier uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
=======
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
=======
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
=======
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
=======
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
=======
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
=======
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
=======
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.security.impl;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.security.ManageableSecurity;
import com.opengamma.master.security.SecurityDocument;
import com.opengamma.master.security.SecurityHistoryRequest;
import com.opengamma.master.security.SecurityHistoryResult;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.master.security.SecurityMetaDataRequest;
import com.opengamma.master.security.SecurityMetaDataResult;
import com.opengamma.master.security.SecuritySearchRequest;
import com.opengamma.master.security.SecuritySearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code SecurityMaster}.
* <p>
* This security master does not support versioning of securities.
*/
public class InMemorySecurityMaster implements SecurityMaster {
// TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSec";
/**
* A cache of securities by identifier.
*/
private final ConcurrentMap<ObjectId, SecurityDocument> _store = new ConcurrentHashMap<ObjectId, SecurityDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySecurityMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
=======
public InMemorySecurityMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
=======
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySecurityMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
=======
public InMemorySecurityMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public SecurityMetaDataResult metaData(SecurityMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
SecurityMetaDataResult result = new SecurityMetaDataResult();
if (request.isSecurityTypes()) {
Set<String> types = new HashSet<String>();
for (SecurityDocument doc : _store.values()) {
types.add(doc.getSecurity().getSecurityType());
}
result.getSecurityTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@Override
public SecuritySearchResult search(final SecuritySearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<SecurityDocument> list = new ArrayList<SecurityDocument>();
for (SecurityDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
SecuritySearchResult result = new SecuritySearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument get(final ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final SecurityDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Security not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument add(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableSecurity security = document.getSecurity();
security.setUniqueId(uniqueId);
final Instant now = Instant.now();
final SecurityDocument doc = new SecurityDocument(security);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument update(final SecurityDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSecurity(), "document.security");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final SecurityDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public SecurityDocument correct(final SecurityDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public SecurityHistoryResult history(final SecurityHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final SecurityHistoryResult result = new SecurityHistoryResult();
final SecurityDocument doc = get(request.getObjectId(), VersionCorrection.LATEST);
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.of(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Diff Result
No diff
Case 57 - java_ogplatform.rev_412e2_f4b6f..InMemorySnapshotMaster.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectIdentifier, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectIdentifier, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectIdentifier, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectIdentifier;
import com.opengamma.id.ObjectIdentifierSupplier;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectIdentifier, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectIdentifier, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectIdentifier> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectIdentifier objectId = _objectIdSupplier.get();
final UniqueIdentifier uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueIdentifier uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.listener.BasicMasterChangeManager;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final MasterChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.masterChanged(MasterChangedType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.masterChanged(MasterChangedType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
=======
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
=======
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectIdentifier}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
}
=======
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
}
=======
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
>>>>>>> YOURS
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
=======
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueIdentifier uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.masterChanged(MasterChangedType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
=======
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
=======
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
=======
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.marketdatasnapshot.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.marketdatasnapshot.ManageableMarketDataSnapshot;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotDocument;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotHistoryResult;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchRequest;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.Paging;
/**
* A simple, in-memory implementation of {@code MarketDataSnapshotMaster}.
* <p>
* This snapshot master does not support versioning of snapshots.
*/
public class InMemorySnapshotMaster implements MarketDataSnapshotMaster {
//TODO: This is not hardened for production, as the data in the master can
// be altered from outside as it is the same object
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemSnap";
/**
* A cache of snapshots by identifier.
*/
private final ConcurrentMap<ObjectId, MarketDataSnapshotDocument> _store = new ConcurrentHashMap<ObjectId, MarketDataSnapshotDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemorySnapshotMaster() {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final ChangeManager changeManager) {
this(new ObjectIdentifierSupplier(DEFAULT_OID_SCHEME), changeManager);
=======
public InMemorySnapshotMaster(final MasterChangeManager changeManager) {
this(new ObjectIdSupplier(DEFAULT_OID_SCHEME), changeManager);
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
=======
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicMasterChangeManager());
>>>>>>> YOURS
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
<<<<<<< MINE
public InMemorySnapshotMaster(final Supplier<ObjectIdentifier> objectIdSupplier, final ChangeManager changeManager) {
=======
public InMemorySnapshotMaster(final Supplier<ObjectId> objectIdSupplier, final MasterChangeManager changeManager) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotSearchResult search(MarketDataSnapshotSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<MarketDataSnapshotDocument> list = new ArrayList<MarketDataSnapshotDocument>();
for (MarketDataSnapshotDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
MarketDataSnapshotSearchResult result = new MarketDataSnapshotSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
final MarketDataSnapshotDocument document = _store.get(uniqueId.getObjectId());
if (document == null || !document.getUniqueId().equals(uniqueId)) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final MarketDataSnapshotDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Snapshot not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument add(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final ManageableMarketDataSnapshot snapshot = document.getSnapshot();
snapshot.setUniqueId(uniqueId);
final Instant now = Instant.now();
final MarketDataSnapshotDocument doc = new MarketDataSnapshotDocument(snapshot);
doc.setVersionFromInstant(now);
doc.setCorrectionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument update(MarketDataSnapshotDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getSnapshot(), "document.snapshot");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final MarketDataSnapshotDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Snapshot not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Security not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotDocument correct(MarketDataSnapshotDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public MarketDataSnapshotHistoryResult history(MarketDataSnapshotHistoryRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
final MarketDataSnapshotDocument doc = _store.get(request.getObjectId());
final List<MarketDataSnapshotDocument> list = (doc != null) ? Collections.singletonList(doc) : Collections.<MarketDataSnapshotDocument>emptyList();
final MarketDataSnapshotHistoryResult result = new MarketDataSnapshotHistoryResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(list);
return result;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Diff Result
No diff
Case 58 - java_ogplatform.rev_412e2_f4b6f..ViewProcessorManagerTest.java
Base
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.master.listener.MasterChangeListener;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChanged;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.listener.NotifyingMaster;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueIdentifier.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueIdentifier getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueIdentifier viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueIdentifier clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements MasterChangeManager {
private MasterChangeListener _listener;
@Override
public void addChangeListener(MasterChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(MasterChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
public void masterChanged(MasterChangedType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
public void notifyListenerUnwatchedIdentifier() {
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
}
public void notifyListenerWatchedIdentifier() {
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements NotifyingMaster {
private MasterChangeManager _changeManager = new MockChangeManager();
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.master.listener.MasterChangeListener;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChanged;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.listener.NotifyingMaster;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueIdentifier.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueIdentifier getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueIdentifier viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueIdentifier clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements MasterChangeManager {
private MasterChangeListener _listener;
@Override
public void addChangeListener(MasterChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(MasterChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
public void masterChanged(MasterChangedType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
public void notifyListenerUnwatchedIdentifier() {
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
}
public void notifyListenerWatchedIdentifier() {
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements NotifyingMaster {
private MasterChangeManager _changeManager = new MockChangeManager();
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
Left
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueIdentifier.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueIdentifier getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueIdentifier viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueIdentifier clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
public void notifyListenerUnwatchedIdentifier() {
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
}
public void notifyListenerWatchedIdentifier() {
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueIdentifier.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueIdentifier getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueIdentifier viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueIdentifier clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
public void notifyListenerUnwatchedIdentifier() {
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
}
public void notifyListenerWatchedIdentifier() {
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
Right
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.master.listener.MasterChangeListener;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChanged;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.listener.NotifyingMaster;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements MasterChangeManager {
private MasterChangeListener _listener;
@Override
public void addChangeListener(MasterChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(MasterChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
}
public void notifyListenerUnwatchedIdentifier() {
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
}
public void notifyListenerWatchedIdentifier() {
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements NotifyingMaster {
private MasterChangeManager _changeManager = new MockChangeManager();
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.master.listener.MasterChangeListener;
import com.opengamma.master.listener.MasterChangeManager;
import com.opengamma.master.listener.MasterChanged;
import com.opengamma.master.listener.MasterChangedType;
import com.opengamma.master.listener.NotifyingMaster;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements MasterChangeManager {
private MasterChangeListener _listener;
@Override
public void addChangeListener(MasterChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(MasterChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
}
public void notifyListenerUnwatchedIdentifier() {
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
}
public void notifyListenerWatchedIdentifier() {
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements NotifyingMaster {
private MasterChangeManager _changeManager = new MockChangeManager();
@Override
public MasterChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
<<<<<<< MINE
@Override
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
=======
@Override
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
}
>>>>>>> YOURS
public void notifyListenerUnwatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
>>>>>>> YOURS
}
public void notifyListenerWatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
>>>>>>> YOURS
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
<<<<<<< MINE
@Override
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
=======
@Override
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
}
>>>>>>> YOURS
public void notifyListenerUnwatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
>>>>>>> YOURS
}
public void notifyListenerWatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
>>>>>>> YOURS
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
@Override
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
}
public void notifyListenerUnwatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
>>>>>>> YOURS
}
public void notifyListenerWatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
>>>>>>> YOURS
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
@Override
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
}
public void notifyListenerUnwatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
>>>>>>> YOURS
}
public void notifyListenerWatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
>>>>>>> YOURS
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
Safe
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
<<<<<<< MINE
@Override
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
=======
@Override
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
}
>>>>>>> YOURS
public void notifyListenerUnwatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
>>>>>>> YOURS
}
public void notifyListenerWatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
>>>>>>> YOURS
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
<<<<<<< MINE
@Override
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
}
=======
@Override
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
}
>>>>>>> YOURS
public void notifyListenerUnwatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
>>>>>>> YOURS
}
public void notifyListenerWatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
>>>>>>> YOURS
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
Unstructured
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
<<<<<<< MINE
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
=======
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
>>>>>>> YOURS
}
public void notifyListenerUnwatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
>>>>>>> YOURS
}
public void notifyListenerWatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
>>>>>>> YOURS
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.view;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import javax.time.Instant;
import org.testng.annotations.Test;
import com.opengamma.core.change.ChangeEvent;
import com.opengamma.core.change.ChangeListener;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeProvider;
import com.opengamma.core.change.ChangeType;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.CachingFunctionRepositoryCompiler;
import com.opengamma.engine.function.CompiledFunctionService;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.InMemoryFunctionRepository;
import com.opengamma.engine.test.MockFunction;
import com.opengamma.engine.view.ViewDefinitionRepository;
import com.opengamma.engine.view.ViewProcess;
import com.opengamma.engine.view.ViewProcessorInternal;
import com.opengamma.engine.view.calc.EngineResourceManager;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.event.ViewProcessorEventListenerRegistry;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.master.VersionedSource;
import com.opengamma.util.test.Timeout;
/**
* Test the ViewProcessorManager class.
*/
public class ViewProcessorManagerTest {
//-------------------------------------------------------------------------
private static class MockViewProcessor implements ViewProcessorInternal {
private final CompiledFunctionService _compiledFunctionService;
private final LinkedBlockingQueue<Boolean> _suspendState = new LinkedBlockingQueue<Boolean>();
private boolean _running;
private boolean _suspended;
public MockViewProcessor() {
final InMemoryFunctionRepository functions = new InMemoryFunctionRepository();
_compiledFunctionService = new CompiledFunctionService(functions, new CachingFunctionRepositoryCompiler(), new FunctionCompilationContext());
functions.addFunction(new MockFunction("mock", new ComputationTarget("Foo")) {
@Override
public void init(final FunctionCompilationContext context) {
context.getFunctionReinitializer().reinitializeFunction(getFunctionDefinition(), UniqueId.of("Test", "Watched"));
}
});
}
@Override
public Future<Runnable> suspend(final ExecutorService executorService) {
return executorService.submit(new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertFalse(_suspended);
_suspended = true;
_suspendState.add(Boolean.TRUE);
}
}
}, (Runnable) new Runnable() {
@Override
public void run() {
synchronized (MockViewProcessor.this) {
assertTrue(_running);
assertTrue(_suspended);
_suspended = false;
_suspendState.add(Boolean.FALSE);
}
}
});
}
@Override
public synchronized boolean isRunning() {
return _running;
}
@Override
public synchronized void start() {
assertFalse(_running);
_running = true;
}
@Override
public synchronized void stop() {
assertTrue(_running);
_running = false;
}
public Boolean isSuspended(final long timeout) throws InterruptedException {
return _suspendState.poll(timeout, TimeUnit.MILLISECONDS);
}
@Override
public UniqueId getUniqueId() {
return null;
}
@Override
public ViewDefinitionRepository getViewDefinitionRepository() {
return null;
}
@Override
public Collection<? extends ViewProcess> getViewProcesses() {
return null;
}
@Override
public ViewProcess getViewProcess(UniqueId viewProcessId) {
return null;
}
@Override
public Collection<ViewClient> getViewClients() {
return null;
}
@Override
public ViewClient createViewClient(UserPrincipal clientUser) {
return null;
}
@Override
public ViewClient getViewClient(UniqueId clientId) {
return null;
}
@Override
public CompiledFunctionService getFunctionCompilationService() {
return _compiledFunctionService;
}
@Override
public ViewProcessorEventListenerRegistry getViewProcessorEventListenerRegistry() {
return null;
}
@Override
public EngineResourceManager<ViewCycle> getViewCycleManager() {
return null;
}
}
//-------------------------------------------------------------------------
private static final class MockChangeManager implements ChangeManager {
private ChangeListener _listener;
@Override
public void addChangeListener(ChangeListener listener) {
assertNull(_listener);
_listener = listener;
}
@Override
public void removeChangeListener(ChangeListener listener) {
assertEquals(listener, _listener);
_listener = null;
}
public boolean hasListener() {
return _listener != null;
}
@Override
<<<<<<< MINE
public void entityChanged(ChangeType type, UniqueIdentifier beforeId, UniqueIdentifier afterId, Instant versionInstant) {
=======
public void masterChanged(MasterChangedType type, UniqueId beforeId, UniqueId afterId, Instant versionInstant) {
>>>>>>> YOURS
}
public void notifyListenerUnwatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Unwatched"), UniqueIdentifier.of("Test", "UnwatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Unwatched"), UniqueId.of("Test", "UnwatchedNew"), Instant.now()));
>>>>>>> YOURS
}
public void notifyListenerWatchedIdentifier() {
<<<<<<< MINE
_listener.entityChanged(new ChangeEvent(ChangeType.UPDATED, UniqueIdentifier.of("Test", "Watched"), UniqueIdentifier.of("Test", "WatchedNew"), Instant.now()));
=======
_listener.masterChanged(new MasterChanged(MasterChangedType.UPDATED, UniqueId.of("Test", "Watched"), UniqueId.of("Test", "WatchedNew"), Instant.now()));
>>>>>>> YOURS
}
}
//-------------------------------------------------------------------------
private static class MockNotifyingMaster implements ChangeProvider {
private ChangeManager _changeManager = new MockChangeManager();
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
//-------------------------------------------------------------------------
private static class MockVersionedSource implements VersionedSource {
private final LinkedBlockingQueue<VersionCorrection> _versionCorrections = new LinkedBlockingQueue<VersionCorrection>();
@Override
public void setVersionCorrection(VersionCorrection versionCorrection) {
_versionCorrections.add(versionCorrection);
}
public VersionCorrection getVersionCorrection() throws InterruptedException {
return _versionCorrections.poll(Timeout.standardTimeoutMillis(), TimeUnit.MILLISECONDS);
}
}
//-------------------------------------------------------------------------
@Test
public void testBasicOperation() throws InterruptedException {
final ViewProcessorManager vpm = new ViewProcessorManager();
final MockViewProcessor vp = new MockViewProcessor();
vpm.setViewProcessor(vp);
final MockNotifyingMaster master = new MockNotifyingMaster();
final MockChangeManager changeManger = (MockChangeManager) master.changeManager();
final MockVersionedSource source = new MockVersionedSource();
vpm.setMasterAndSource(master, source);
// Check normal startup
vpm.start();
assertTrue(changeManger.hasListener());
assertTrue(vpm.isRunning());
assertTrue(vp.isRunning());
Long initialId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
assertNotNull(initialId);
VersionCorrection initialVersion = source.getVersionCorrection();
// Notify it of a change to the master
Thread.sleep(10);
changeManger.notifyListenerUnwatchedIdentifier();
assertNull(vp.isSuspended(Timeout.standardTimeoutMillis()));
changeManger.notifyListenerWatchedIdentifier();
assertEquals(Boolean.TRUE, vp.isSuspended(Timeout.standardTimeoutMillis()));
VersionCorrection newVersion = source.getVersionCorrection();
assertTrue(newVersion.getVersionAsOf().isAfter(initialVersion.getVersionAsOf()));
Long newId = 0L;
for (int i = 0; i < 10; i++) {
Thread.sleep(Timeout.standardTimeoutMillis() / 10);
newId = vp.getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
}
assertTrue(newId > initialId);
assertEquals(Boolean.FALSE, vp.isSuspended(Timeout.standardTimeoutMillis()));
// Shutdown
vpm.stop();
assertFalse(vpm.isRunning());
assertFalse(vp.isRunning());
assertFalse(changeManger.hasListener());
}
}
Diff Result
No diff
Case 59 - java_ogplatform.rev_46b89_94950..BlackFunctionData.java
Base
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _f;
private final double _df;
private final double _sigma;
public BlackFunctionData(final double f, final double df, final double sigma) {
Validate.isTrue(df <= 1 && df > 0, "discount factor must be <= 1 and > 0");
_f = f;
_df = df;
_sigma = sigma;
}
public double getForward() {
return _f;
}
public double getDiscountFactor() {
return _df;
}
public double getBlackVolatility() {
return _sigma;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_df);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_f);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_sigma);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_df) != Double.doubleToLongBits(other._df)) {
return false;
}
if (Double.doubleToLongBits(_f) != Double.doubleToLongBits(other._f)) {
return false;
}
return Double.doubleToLongBits(_sigma) == Double.doubleToLongBits(other._sigma);
}
}
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _f;
private final double _df;
private final double _sigma;
public BlackFunctionData(final double f, final double df, final double sigma) {
Validate.isTrue(df <= 1 && df > 0, "discount factor must be <= 1 and > 0");
_f = f;
_df = df;
_sigma = sigma;
}
public double getForward() {
return _f;
}
public double getDiscountFactor() {
return _df;
}
public double getBlackVolatility() {
return _sigma;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_df);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_f);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_sigma);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_df) != Double.doubleToLongBits(other._df)) {
return false;
}
if (Double.doubleToLongBits(_f) != Double.doubleToLongBits(other._f)) {
return false;
}
return Double.doubleToLongBits(_sigma) == Double.doubleToLongBits(other._sigma);
}
}
Left
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
public double getBlackVolatility() {
return _blackVolatility;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
public double getBlackVolatility() {
return _blackVolatility;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
Right
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _f;
private final double _df;
private final double _sigma;
public BlackFunctionData(final double f, final double df, final double sigma) {
Validate.isTrue(df <= 1 && df > 0, "discount factor must be <= 1 and > 0");
_f = f;
_df = df;
_sigma = sigma;
}
public double getForward() {
return _f;
}
public double getDiscountFactor() {
return _df;
}
public double getSimga() {
return _sigma;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_df);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_f);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_sigma);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_df) != Double.doubleToLongBits(other._df)) {
return false;
}
if (Double.doubleToLongBits(_f) != Double.doubleToLongBits(other._f)) {
return false;
}
return Double.doubleToLongBits(_sigma) == Double.doubleToLongBits(other._sigma);
}
}
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _f;
private final double _df;
private final double _sigma;
public BlackFunctionData(final double f, final double df, final double sigma) {
Validate.isTrue(df <= 1 && df > 0, "discount factor must be <= 1 and > 0");
_f = f;
_df = df;
_sigma = sigma;
}
public double getForward() {
return _f;
}
public double getDiscountFactor() {
return _df;
}
public double getSimga() {
return _sigma;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_df);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_f);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_sigma);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_df) != Double.doubleToLongBits(other._df)) {
return false;
}
if (Double.doubleToLongBits(_f) != Double.doubleToLongBits(other._f)) {
return false;
}
return Double.doubleToLongBits(_sigma) == Double.doubleToLongBits(other._sigma);
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
public double getSimga() {
return _blackVolatility;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
public double getSimga() {
return _blackVolatility;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
public double getBlackVolatility() {
return _blackVolatility;
}
public double getSimga() {
return _sigma;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
public double getBlackVolatility() {
return _blackVolatility;
}
public double getSimga() {
return _sigma;
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
Safe
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
<<<<<<< MINE
public double getSimga() {
return _sigma;
}
=======
public double getBlackVolatility() {
return _blackVolatility;
}
>>>>>>> YOURS
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
<<<<<<< MINE
public double getSimga() {
return _sigma;
}
=======
public double getBlackVolatility() {
return _blackVolatility;
}
>>>>>>> YOURS
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
Unstructured
/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
<<<<<<< MINE
public double getBlackVolatility() {
return _blackVolatility;
=======
public double getSimga() {
return _sigma;
>>>>>>> YOURS
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}/**
* Copyright (C) 2009 - 2011 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.option.pricing.analytic.formula;
import org.apache.commons.lang.Validate;
import com.opengamma.financial.model.option.definition.BlackOptionDataBundle;
import com.opengamma.financial.model.option.definition.EuropeanVanillaOptionDefinition;
/**
*
*/
public class BlackFunctionData {
private final double _forward;
private final double _discountFactor;
private final double _blackVolatility;
public BlackFunctionData(final double forward, final double discountFactor) {
this(forward, discountFactor, 0);
}
public BlackFunctionData(final double forward, final double discountFactor, final double blackVolatility) {
Validate.isTrue(discountFactor <= 1 && discountFactor > 0, "discount factor must be <= 1 and > 0");
_forward = forward;
_discountFactor = discountFactor;
_blackVolatility = blackVolatility;
}
public double getForward() {
return _forward;
}
public double getDiscountFactor() {
return _discountFactor;
}
<<<<<<< MINE
public double getBlackVolatility() {
return _blackVolatility;
=======
public double getSimga() {
return _sigma;
>>>>>>> YOURS
}
public static BlackFunctionData fromDataBundle(final BlackOptionDataBundle bundle, final EuropeanVanillaOptionDefinition definition) {
Validate.notNull(bundle, "bundle");
Validate.notNull(definition, "definition");
final double t = definition.getTimeToExpiry(bundle.getDate());
final double k = definition.getStrike();
return new BlackFunctionData(bundle.getForward(), bundle.getDiscountFactor(t), bundle.getVolatility(t, k));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(_discountFactor);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_forward);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_blackVolatility);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final BlackFunctionData other = (BlackFunctionData) obj;
if (Double.doubleToLongBits(_discountFactor) != Double.doubleToLongBits(other._discountFactor)) {
return false;
}
if (Double.doubleToLongBits(_forward) != Double.doubleToLongBits(other._forward)) {
return false;
}
return Double.doubleToLongBits(_blackVolatility) == Double.doubleToLongBits(other._blackVolatility);
}
}
Diff Result
No diff
Case 60 - java_ogplatform.rev_5a7e9_a0e32..ArgumentCheckerTest.java
Base
/**
* Copyright (C) 2010 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util;
import static org.junit.Assert.*;
import org.junit.Test;
public class ArgumentCheckerTest {
@Test(expected=NullPointerException.class)
public void test_checkNotNull_null() {
try {
ArgumentChecker.checkNotNull(null, "name");
} catch (NullPointerException ex) {
assertTrue(ex.getMessage().contains("'name'"));
assertFalse(ex.getMessage().contains("Injected"));
throw ex;
}
}
@Test
public void test_checkNotNull_nonNull() {
ArgumentChecker.checkNotNull("Kirk", "name");
}
//-------------------------------------------------------------------------
@Test(expected=NullPointerException.class)
public void test_checkNotNullInjected_null() {
try {
ArgumentChecker.checkNotNullInjected(null, "name");
} catch (NullPointerException ex) {
assertTrue(ex.getMessage().contains("'name'"));
assertTrue(ex.getMessage().contains("Injected"));
throw ex;
}
}
@Test
public void test_checkNotNullInjected_nonNull() {
ArgumentChecker.checkNotNullInjected("Kirk", "name");
}
}
/**
* Copyright (C) 2010 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util;
import static org.junit.Assert.*;
import org.junit.Test;
public class ArgumentCheckerTest {
@Test(expected=NullPointerException.class)
public void test_checkNotNull_null() {
try {
ArgumentChecker.checkNotNull(null, "name");
} catch (NullPointerException ex) {
assertTrue(ex.getMessage().contains("'name'"));
assertFalse(ex.getMessage().contains("Injected"));
throw ex;
}
}
@Test
public void test_checkNotNull_nonNull() {
ArgumentChecker.checkNotNull("Kirk", "name");
}
//-------------------------------------------------------------------------
@Test(expected=NullPointerException.class)
public void test_checkNotNullInjected_null() {
try {
ArgumentChecker.checkNotNullInjected(null, "name");
} catch (NullPointerException ex) {
assertTrue(ex.getMessage().contains("'name'"));
assertTrue(ex.getMessage().contains("Injected"));
throw ex;
}
}
@Test
public void test_checkNotNullInjected_nonNull() {
ArgumentChecker.checkNotNullInjected("Kirk", "name");
}
}
Left
/**
* Copyright (C) 2010 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util;
import static org.junit.Assert.*;
import org.junit.Test;
public class ArgumentCheckerTest {
@Test(expected=NullPointerException.class)
public void test_checkNotNull_null() {
try {
ArgumentChecker.notNull(null, "name");
} catch (NullPointerException ex) {
assertTrue(ex.getMessage().contains("'name'"));
assertFalse(ex.getMessage().contains("Injected"));
throw ex;
}
}
@Test
public void test_checkNotNull_nonNull() {
ArgumentChecker.notNull("Kirk", "name");
}
//-------------------------------------------------------------------------
@Test(expected=NullPointerException.class)
public void test_checkNotNullInjected_null() {
try {
ArgumentChecker.notNullInjected(null, "name");
} catch (NullPointerException ex) {
assertTrue(ex.getMessage().contains("'name'"));
assertTrue(ex.getMessage().contains("Injected"));
throw ex;
}
}
@Test
public void test_checkNotNullInjected_nonNull() {
ArgumentChecker.notNullInjected("Kirk", "name");
}
}
/**
* Copyright (C) 2010 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util;
import static org.junit.Assert.*;
import org.junit.Test;
public class ArgumentCheckerTest {
@Test(expected=NullPointerException.class)
public void test_checkNotNull_null() {
try {
ArgumentChecker.notNull(null, "name");
} catch (NullPointerException ex) {
assertTrue(ex.getMessage().contains("'name'"));
assertFalse(ex.getMessage().contains("Injected"));
throw ex;
}
}
@Test
public void test_checkNotNull_nonNull() {
ArgumentChecker.notNull("Kirk", "name");
}
//-------------------------------------------------------------------------
@Test(expected=NullPointerException.class)
public void test_checkNotNullInjected_null() {
try {
ArgumentChecker.notNullInjected(null, "name");
} catch (NullPointerException ex) {
assertTrue(ex.getMessage().contains("'name'"));
assertTrue(ex.getMessage().contains("Injected"));
throw ex;
}
}
@Test
public void test_checkNotNullInjected_nonNull() {
ArgumentChecker.notNullInjected("Kirk", "name");
}
}
Case 61 - java_ogplatform.rev_5a7e9_a0e32..DBTest.java
Base
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest {
private final String _databaseType;
private final DBTool _dbtool;
protected DBTest(String databaseType) {
ArgumentChecker.checkNotNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
returnValue.add(new Object[] { db });
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest {
private final String _databaseType;
private final DBTool _dbtool;
protected DBTest(String databaseType) {
ArgumentChecker.checkNotNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
returnValue.add(new Object[] { db });
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
}
Left
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest implements TableCreationCallback {
private static Map<String,String> s_databaseTypeVersion = new HashMap<String,String> ();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
protected DBTest(String databaseType, String databaseVersion) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
_databaseVersion = databaseVersion;
}
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init () {
String prevVersion = s_databaseTypeVersion.get (getDatabaseType ());
if ((prevVersion == null) || !prevVersion.equals (getDatabaseVersion ())) {
s_databaseTypeVersion.put (getDatabaseType (), getDatabaseVersion ());
_dbtool.setCreateVersion (getDatabaseVersion ());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
if (previousVersionCountString == null) {
previousVersionCount = 0; // If you run from Eclipse, use current version only
} else {
previousVersionCount = Integer.parseInt (previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool (db);
final String[] versions = dbTool.getDatabaseCreatableVersions ();
for (int i = 0; i < versions.length; i++) {
returnValue.add (new Object[] { db, versions[i] });
if (i >= previousVersionCount) break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion () {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded (final String version) {
// No action
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest implements TableCreationCallback {
private static Map<String,String> s_databaseTypeVersion = new HashMap<String,String> ();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
protected DBTest(String databaseType, String databaseVersion) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
_databaseVersion = databaseVersion;
}
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init () {
String prevVersion = s_databaseTypeVersion.get (getDatabaseType ());
if ((prevVersion == null) || !prevVersion.equals (getDatabaseVersion ())) {
s_databaseTypeVersion.put (getDatabaseType (), getDatabaseVersion ());
_dbtool.setCreateVersion (getDatabaseVersion ());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
if (previousVersionCountString == null) {
previousVersionCount = 0; // If you run from Eclipse, use current version only
} else {
previousVersionCount = Integer.parseInt (previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool (db);
final String[] versions = dbTool.getDatabaseCreatableVersions ();
for (int i = 0; i < versions.length; i++) {
returnValue.add (new Object[] { db, versions[i] });
if (i >= previousVersionCount) break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion () {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded (final String version) {
// No action
}
}
Right
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest {
private final String _databaseType;
private final DBTool _dbtool;
protected DBTest(String databaseType) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
returnValue.add(new Object[] { db });
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest {
private final String _databaseType;
private final DBTool _dbtool;
protected DBTest(String databaseType) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
returnValue.add(new Object[] { db });
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
public abstract class DBTest implements TableCreationCallback {
private static Map<String, String> s_databaseTypeVersion = new HashMap<String, String>();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init() {
String prevVersion = s_databaseTypeVersion.get(getDatabaseType());
if ((prevVersion == null) || !prevVersion.equals(getDatabaseVersion())) {
s_databaseTypeVersion.put(getDatabaseType(), getDatabaseVersion());
_dbtool.setCreateVersion(getDatabaseVersion());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
protected DBTest(String databaseType, String databaseVersion) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
// If you run from Eclipse, use Derby only
databaseType = "derby";
}
if (previousVersionCountString == null) {
// If you run from Eclipse, use current version only
previousVersionCount = 0;
} else {
previousVersionCount = Integer.parseInt(previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool(db);
final String[] versions = dbTool.getDatabaseCreatableVersions();
for (int i = 0; i < versions.length; i++) {
returnValue.add(new Object[] { db, versions[i] });
if (i >= previousVersionCount)
break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
// avoids locking issues with Derby
_dbtool.shutdown();
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion() {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded(final String version) {
// No action
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
public abstract class DBTest implements TableCreationCallback {
private static Map<String, String> s_databaseTypeVersion = new HashMap<String, String>();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init() {
String prevVersion = s_databaseTypeVersion.get(getDatabaseType());
if ((prevVersion == null) || !prevVersion.equals(getDatabaseVersion())) {
s_databaseTypeVersion.put(getDatabaseType(), getDatabaseVersion());
_dbtool.setCreateVersion(getDatabaseVersion());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
protected DBTest(String databaseType, String databaseVersion) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
// If you run from Eclipse, use Derby only
databaseType = "derby";
}
if (previousVersionCountString == null) {
// If you run from Eclipse, use current version only
previousVersionCount = 0;
} else {
previousVersionCount = Integer.parseInt(previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool(db);
final String[] versions = dbTool.getDatabaseCreatableVersions();
for (int i = 0; i < versions.length; i++) {
returnValue.add(new Object[] { db, versions[i] });
if (i >= previousVersionCount)
break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
// avoids locking issues with Derby
_dbtool.shutdown();
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion() {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded(final String version) {
// No action
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
public abstract class DBTest implements TableCreationCallback {
private static Map<String, String> s_databaseTypeVersion = new HashMap<String, String>();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
protected DBTest(String databaseType, String databaseVersion) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
_databaseVersion = databaseVersion;
}
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init() {
String prevVersion = s_databaseTypeVersion.get(getDatabaseType());
if ((prevVersion == null) || !prevVersion.equals(getDatabaseVersion())) {
s_databaseTypeVersion.put(getDatabaseType(), getDatabaseVersion());
_dbtool.setCreateVersion(getDatabaseVersion());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
protected DBTest(String databaseType) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
// If you run from Eclipse, use Derby only
databaseType = "derby";
}
if (previousVersionCountString == null) {
// If you run from Eclipse, use current version only
previousVersionCount = 0;
} else {
previousVersionCount = Integer.parseInt(previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool(db);
final String[] versions = dbTool.getDatabaseCreatableVersions();
for (int i = 0; i < versions.length; i++) {
returnValue.add(new Object[] { db, versions[i] });
if (i >= previousVersionCount)
break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
// avoids locking issues with Derby
_dbtool.shutdown();
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion() {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded(final String version) {
// No action
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
public abstract class DBTest implements TableCreationCallback {
private static Map<String, String> s_databaseTypeVersion = new HashMap<String, String>();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
protected DBTest(String databaseType, String databaseVersion) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
_databaseVersion = databaseVersion;
}
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init() {
String prevVersion = s_databaseTypeVersion.get(getDatabaseType());
if ((prevVersion == null) || !prevVersion.equals(getDatabaseVersion())) {
s_databaseTypeVersion.put(getDatabaseType(), getDatabaseVersion());
_dbtool.setCreateVersion(getDatabaseVersion());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
protected DBTest(String databaseType) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
// If you run from Eclipse, use Derby only
databaseType = "derby";
}
if (previousVersionCountString == null) {
// If you run from Eclipse, use current version only
previousVersionCount = 0;
} else {
previousVersionCount = Integer.parseInt(previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool(db);
final String[] versions = dbTool.getDatabaseCreatableVersions();
for (int i = 0; i < versions.length; i++) {
returnValue.add(new Object[] { db, versions[i] });
if (i >= previousVersionCount)
break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
// avoids locking issues with Derby
_dbtool.shutdown();
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion() {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded(final String version) {
// No action
}
}
Safe
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest implements TableCreationCallback {
private static Map<String,String> s_databaseTypeVersion = new HashMap<String,String> ();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init () {
String prevVersion = s_databaseTypeVersion.get (getDatabaseType ());
if ((prevVersion == null) || !prevVersion.equals (getDatabaseVersion ())) {
s_databaseTypeVersion.put (getDatabaseType (), getDatabaseVersion ());
_dbtool.setCreateVersion (getDatabaseVersion ());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
<<<<<<< MINE
protected DBTest(String databaseType) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
=======
protected DBTest(String databaseType, String databaseVersion) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
_databaseVersion = databaseVersion;
}
>>>>>>> YOURS
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
if (previousVersionCountString == null) {
previousVersionCount = 0; // If you run from Eclipse, use current version only
} else {
previousVersionCount = Integer.parseInt (previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool (db);
final String[] versions = dbTool.getDatabaseCreatableVersions ();
for (int i = 0; i < versions.length; i++) {
returnValue.add (new Object[] { db, versions[i] });
if (i >= previousVersionCount) break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion () {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded (final String version) {
// No action
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest implements TableCreationCallback {
private static Map<String,String> s_databaseTypeVersion = new HashMap<String,String> ();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init () {
String prevVersion = s_databaseTypeVersion.get (getDatabaseType ());
if ((prevVersion == null) || !prevVersion.equals (getDatabaseVersion ())) {
s_databaseTypeVersion.put (getDatabaseType (), getDatabaseVersion ());
_dbtool.setCreateVersion (getDatabaseVersion ());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
<<<<<<< MINE
protected DBTest(String databaseType) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
}
=======
protected DBTest(String databaseType, String databaseVersion) {
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
_databaseVersion = databaseVersion;
}
>>>>>>> YOURS
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
if (previousVersionCountString == null) {
previousVersionCount = 0; // If you run from Eclipse, use current version only
} else {
previousVersionCount = Integer.parseInt (previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool (db);
final String[] versions = dbTool.getDatabaseCreatableVersions ();
for (int i = 0; i < versions.length; i++) {
returnValue.add (new Object[] { db, versions[i] });
if (i >= previousVersionCount) break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion () {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded (final String version) {
// No action
}
}
Unstructured
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest implements TableCreationCallback {
private static Map<String,String> s_databaseTypeVersion = new HashMap<String,String> ();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
<<<<<<< MINE
protected DBTest(String databaseType, String databaseVersion) {
=======
protected DBTest(String databaseType) {
>>>>>>> YOURS
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
_databaseVersion = databaseVersion;
}
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init () {
String prevVersion = s_databaseTypeVersion.get (getDatabaseType ());
if ((prevVersion == null) || !prevVersion.equals (getDatabaseVersion ())) {
s_databaseTypeVersion.put (getDatabaseType (), getDatabaseVersion ());
_dbtool.setCreateVersion (getDatabaseVersion ());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
if (previousVersionCountString == null) {
previousVersionCount = 0; // If you run from Eclipse, use current version only
} else {
previousVersionCount = Integer.parseInt (previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool (db);
final String[] versions = dbTool.getDatabaseCreatableVersions ();
for (int i = 0; i < versions.length; i++) {
returnValue.add (new Object[] { db, versions[i] });
if (i >= previousVersionCount) break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion () {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded (final String version) {
// No action
}
}/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.util.test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.test.DBTool.TableCreationCallback;
/**
*
*
* @author pietari
*/
@RunWith(Parameterized.class)
abstract public class DBTest implements TableCreationCallback {
private static Map<String,String> s_databaseTypeVersion = new HashMap<String,String> ();
private final String _databaseType;
private final String _databaseVersion;
private final DBTool _dbtool;
<<<<<<< MINE
protected DBTest(String databaseType, String databaseVersion) {
=======
protected DBTest(String databaseType) {
>>>>>>> YOURS
ArgumentChecker.notNull(databaseType, "Database type");
_databaseType = databaseType;
_dbtool = TestProperties.getDbTool(databaseType);
_databaseVersion = databaseVersion;
}
/**
* Initialise the database to the required version. This tracks the last initialised version
* in a static map to avoid duplicate DB operations on bigger test classes. This might not be
* such a good idea.
*/
@Before
public void init () {
String prevVersion = s_databaseTypeVersion.get (getDatabaseType ());
if ((prevVersion == null) || !prevVersion.equals (getDatabaseVersion ())) {
s_databaseTypeVersion.put (getDatabaseType (), getDatabaseVersion ());
_dbtool.setCreateVersion (getDatabaseVersion ());
_dbtool.dropTestSchema();
_dbtool.createTestSchema();
_dbtool.createTestTables(this);
}
}
@Parameters
public static Collection<Object[]> getDatabaseTypes() {
String databaseType = System.getProperty("test.database.type");
String previousVersionCountString = System.getProperty("test.database.previousVersions");
int previousVersionCount;
if (databaseType == null) {
databaseType = "derby"; // If you run from Eclipse, use Derby only
}
if (previousVersionCountString == null) {
previousVersionCount = 0; // If you run from Eclipse, use current version only
} else {
previousVersionCount = Integer.parseInt (previousVersionCountString);
}
ArrayList<Object[]> returnValue = new ArrayList<Object[]>();
for (String db : TestProperties.getDatabaseTypes(databaseType)) {
final DBTool dbTool = TestProperties.getDbTool (db);
final String[] versions = dbTool.getDatabaseCreatableVersions ();
for (int i = 0; i < versions.length; i++) {
returnValue.add (new Object[] { db, versions[i] });
if (i >= previousVersionCount) break;
}
}
return returnValue;
}
@Before
public void setUp() throws Exception {
_dbtool.clearTestTables();
}
@After
public void tearDown() throws Exception {
_dbtool.shutdown(); // avoids locking issues with Derby
}
public DBTool getDbTool() {
return _dbtool;
}
public String getDatabaseType() {
return _databaseType;
}
public String getDatabaseVersion () {
return _databaseVersion;
}
/**
* Override this if you wish to do something with the database while it is in its "upgrading" state - e.g. populate with test data
* at a particular version to test the data transformations on the next version upgrades.
*/
public void tablesCreatedOrUpgraded (final String version) {
// No action
}
}
Diff Result
No diff
Case 62 - java_ogplatform.rev_73c69_b21a5..UniqueIdentifierTest.java
Case 63 - java_ogplatform.rev_7b3fb_6494c..EquityFuturesFunction.java
Base
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.EquityFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - Maybe we should reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private EquityFutureConverter _financialToAnalyticConverter;
private final EquityFuturesPricer _pricer;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
_pricer = EquityFuturePricerFactory.getMethod(pricingMethodName); // TODO: THIS FACTORY IS HOW ONE TAKES PRICER OUT OF THE CLASS. SEE YCNS
}
@Override
public void init(final FunctionCompilationContext context) {
_financialToAnalyticConverter = new EquityFutureConverter();
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.CALCULATION_METHOD);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getEquityFutureDataBundle(final EquityFutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = getSpot(security, inputs);
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, security.getUnderlyingId()).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final EquityFutureSecurity security = (EquityFutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
final EquityFutureDefinition definition = _financialToAnalyticConverter.visitEquityFutureTrade(trade, lastMarginPrice);
final EquityFuture derivative = definition.toDerivative(valuationTime);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getEquityFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = _pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = _pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = _pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = _pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = _pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = _pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof EquityFutureSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final EquityFutureSecurity security = (EquityFutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement marketValueReq = getMarketValueRequirement(context, security);
if (marketValueReq == null) {
return null;
}
requirements.add(marketValueReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
// Spot underlying
requirements.add(getSpotAssetRequirement(security));
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
break;
case COST_OF_CARRY:
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, EquityFutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final EquityFutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
}
@SuppressWarnings("unused")
private Double getDividendYield(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
return req;
}
protected Double getSpot(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, security.getUnderlyingId());
}
protected Double getCostOfCarry(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getMarketValueRequirement(final FunctionCompilationContext context, final EquityFutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final EquityFutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(security.getUnderlyingId()), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
protected final EquityFutureConverter getFinancialToAnalyticConverter() {
return _financialToAnalyticConverter;
}
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.EquityFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - Maybe we should reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private EquityFutureConverter _financialToAnalyticConverter;
private final EquityFuturesPricer _pricer;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
_pricer = EquityFuturePricerFactory.getMethod(pricingMethodName); // TODO: THIS FACTORY IS HOW ONE TAKES PRICER OUT OF THE CLASS. SEE YCNS
}
@Override
public void init(final FunctionCompilationContext context) {
_financialToAnalyticConverter = new EquityFutureConverter();
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.CALCULATION_METHOD);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getEquityFutureDataBundle(final EquityFutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = getSpot(security, inputs);
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, security.getUnderlyingId()).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final EquityFutureSecurity security = (EquityFutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
final EquityFutureDefinition definition = _financialToAnalyticConverter.visitEquityFutureTrade(trade, lastMarginPrice);
final EquityFuture derivative = definition.toDerivative(valuationTime);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getEquityFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = _pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = _pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = _pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = _pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = _pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = _pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof EquityFutureSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final EquityFutureSecurity security = (EquityFutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement marketValueReq = getMarketValueRequirement(context, security);
if (marketValueReq == null) {
return null;
}
requirements.add(marketValueReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
// Spot underlying
requirements.add(getSpotAssetRequirement(security));
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
break;
case COST_OF_CARRY:
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, EquityFutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final EquityFutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
}
@SuppressWarnings("unused")
private Double getDividendYield(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
return req;
}
protected Double getSpot(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, security.getUnderlyingId());
}
protected Double getCostOfCarry(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getMarketValueRequirement(final FunctionCompilationContext context, final EquityFutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final EquityFutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(security.getUnderlyingId()), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
protected final EquityFutureConverter getFinancialToAnalyticConverter() {
return _financialToAnalyticConverter;
}
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
Left
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter(); // TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = getSpotAssetId(security);
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, getSpotAssetId(security));
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter(); // TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = getSpotAssetId(security);
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, getSpotAssetId(security));
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
Right
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.EquityFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - Maybe we should reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private EquityFutureConverter _financialToAnalyticConverter;
private final EquityFuturesPricer _pricer;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
_pricer = EquityFuturePricerFactory.getMethod(pricingMethodName); // TODO: THIS FACTORY IS HOW ONE TAKES PRICER OUT OF THE CLASS. SEE YCNS
}
@Override
public void init(final FunctionCompilationContext context) {
_financialToAnalyticConverter = new EquityFutureConverter();
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.CALCULATION_METHOD);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getEquityFutureDataBundle(final EquityFutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = getSpot(security, inputs);
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, security.getUnderlyingId()).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final EquityFutureSecurity security = (EquityFutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
final EquityFutureDefinition definition = _financialToAnalyticConverter.visitEquityFutureTrade(trade, lastMarginPrice);
final EquityFuture derivative = definition.toDerivative(valuationTime);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getEquityFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = _pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = _pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = _pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = _pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = _pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = _pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final EquityFutureSecurity security = (EquityFutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement marketValueReq = getMarketValueRequirement(context, security);
if (marketValueReq == null) {
return null;
}
requirements.add(marketValueReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
// Spot underlying
requirements.add(getSpotAssetRequirement(security));
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
break;
case COST_OF_CARRY:
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, EquityFutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final EquityFutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
}
@SuppressWarnings("unused")
private Double getDividendYield(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
return req;
}
protected Double getSpot(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
}
protected Double getCostOfCarry(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getMarketValueRequirement(final FunctionCompilationContext context, final EquityFutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final EquityFutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(security.getUnderlyingId()), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
protected final EquityFutureConverter getFinancialToAnalyticConverter() {
return _financialToAnalyticConverter;
}
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.EquityFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - Maybe we should reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private EquityFutureConverter _financialToAnalyticConverter;
private final EquityFuturesPricer _pricer;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
_pricer = EquityFuturePricerFactory.getMethod(pricingMethodName); // TODO: THIS FACTORY IS HOW ONE TAKES PRICER OUT OF THE CLASS. SEE YCNS
}
@Override
public void init(final FunctionCompilationContext context) {
_financialToAnalyticConverter = new EquityFutureConverter();
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.CALCULATION_METHOD);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getEquityFutureDataBundle(final EquityFutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = getSpot(security, inputs);
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, security.getUnderlyingId()).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final EquityFutureSecurity security = (EquityFutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
final EquityFutureDefinition definition = _financialToAnalyticConverter.visitEquityFutureTrade(trade, lastMarginPrice);
final EquityFuture derivative = definition.toDerivative(valuationTime);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getEquityFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = _pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = _pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = _pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = _pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = _pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = _pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final EquityFutureSecurity security = (EquityFutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement marketValueReq = getMarketValueRequirement(context, security);
if (marketValueReq == null) {
return null;
}
requirements.add(marketValueReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
// Spot underlying
requirements.add(getSpotAssetRequirement(security));
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
break;
case COST_OF_CARRY:
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, EquityFutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final EquityFutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
}
@SuppressWarnings("unused")
private Double getDividendYield(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
return req;
}
protected Double getSpot(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
}
protected Double getCostOfCarry(EquityFutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getMarketValueRequirement(final FunctionCompilationContext context, final EquityFutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final EquityFutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(security.getUnderlyingId()), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
protected final EquityFutureConverter getFinancialToAnalyticConverter() {
return _financialToAnalyticConverter;
}
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
MergeMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter();
// TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
<<<<<<< MINE
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
=======
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
>>>>>>> YOURS
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
}
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
return req;
}
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter();
// TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
<<<<<<< MINE
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
=======
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
>>>>>>> YOURS
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
}
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
return req;
}
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
KeepBothMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter();
// TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
<<<<<<< MINE
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
=======
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
>>>>>>> YOURS
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = getSpotAssetId(security);
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, getSpotAssetId(security));
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
}
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
return req;
}
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter();
// TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
<<<<<<< MINE
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
=======
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
>>>>>>> YOURS
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = getSpotAssetId(security);
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, getSpotAssetId(security));
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
}
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
return req;
}
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
Safe
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter();
// TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
<<<<<<< MINE
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
=======
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
>>>>>>> YOURS
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
<<<<<<< MINE
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
}
=======
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = getSpotAssetId(security);
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
}
>>>>>>> YOURS
<<<<<<< MINE
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
return req;
}
=======
>>>>>>> YOURS
<<<<<<< MINE
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
}
=======
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, getSpotAssetId(security));
}
>>>>>>> YOURS
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter();
// TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
<<<<<<< MINE
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
=======
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
>>>>>>> YOURS
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
<<<<<<< MINE
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
}
=======
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = getSpotAssetId(security);
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
}
>>>>>>> YOURS
<<<<<<< MINE
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
return req;
}
=======
>>>>>>> YOURS
<<<<<<< MINE
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
}
=======
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, getSpotAssetId(security));
}
>>>>>>> YOURS
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the financialToAnalyticConverter.
* @return the financialToAnalyticConverter
*/
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
Unstructured
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter(); // TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
<<<<<<< MINE
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
=======
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
>>>>>>> YOURS
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
<<<<<<< MINE
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = getSpotAssetId(security);
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
=======
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
>>>>>>> YOURS
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
<<<<<<< MINE
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
=======
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
>>>>>>> YOURS
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
<<<<<<< MINE
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, getSpotAssetId(security));
=======
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
>>>>>>> YOURS
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.futures;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.time.calendar.Period;
import javax.time.calendar.ZonedDateTime;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.future.definition.EquityFutureDefinition;
import com.opengamma.analytics.financial.equity.future.derivative.EquityFuture;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturePricerFactory;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricer;
import com.opengamma.analytics.financial.equity.future.pricing.EquityFuturesPricingMethod;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.financial.simpleinstruments.definition.SimpleFutureDefinition;
import com.opengamma.analytics.financial.simpleinstruments.pricing.SimpleFutureDataBundle;
import com.opengamma.core.position.Trade;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.conversion.SimpleFutureConverter;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesBundle;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.future.EquityFutureSecurity;
import com.opengamma.financial.security.future.FutureSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.util.money.Currency;
/**
* This function will produce all valueRequirements that the EquityFutureSecurity offers. A trade may produce additional generic ones, e.g. date and number of contracts..
*/
public class EquityFuturesFunction extends AbstractFunction.NonCompiledInvoker {
// TODO: Refactor - this is a field name, like PX_LAST - We can't reference BloombergConstants.BBG_FIELD_DIVIDEND_YIELD here
private static final String DIVIDEND_YIELD_FIELD = "EQY_DVD_YLD_EST";
private static final SimpleFutureConverter CONVERTER = new SimpleFutureConverter(); // TODO: Had been EquityFutureConverter();
private final String _valueRequirementName;
private final EquityFuturesPricingMethod _pricingMethod;
private final String _pricingMethodName;
/**
* @param valueRequirementName String describes the value requested
* @param pricingMethodName String corresponding to enum EquityFuturesPricingMethod {MARK_TO_MARKET or COST_OF_CARRY, DIVIDEND_YIELD}
*/
public EquityFuturesFunction(final String valueRequirementName, final String pricingMethodName) {
Validate.notNull(valueRequirementName, "value requirement name");
Validate.notNull(pricingMethodName, "pricing method name");
Validate.isTrue(valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)
|| valueRequirementName.equals(ValueRequirementNames.PV01)
|| valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)
|| valueRequirementName.equals(ValueRequirementNames.SPOT)
|| valueRequirementName.equals(ValueRequirementNames.FORWARD)
|| valueRequirementName.equals(ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES),
"EquityFuturesFunction provides the following values PRESENT_VALUE, VALUE_DELTA, VALUE_RHO, PV01, FORWARD, and SPOT. Please choose one.");
_valueRequirementName = valueRequirementName;
Validate.isTrue(pricingMethodName.equals(EquityFuturePricerFactory.MARK_TO_MARKET)
|| pricingMethodName.equals(EquityFuturePricerFactory.COST_OF_CARRY)
|| pricingMethodName.equals(EquityFuturePricerFactory.DIVIDEND_YIELD),
"OG-Analytics provides the following pricing methods for EquityFutureSecurity: MARK_TO_MARKET, DIVIDEND_YIELD and COST_OF_CARRY. Please choose one.");
_pricingMethod = EquityFuturesPricingMethod.valueOf(pricingMethodName);
_pricingMethodName = pricingMethodName;
}
@Override
public Set<ValueSpecification> getResults(FunctionCompilationContext context, ComputationTarget target) {
return Collections.singleton(new ValueSpecification(_valueRequirementName, target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final String calcMethod = getPricingMethodName();
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CALCULATION_METHOD, calcMethod)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return properties;
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target,
final String fundingCurveName, final String curveConfigName, final String pricingMethodName) {
final Currency ccy = FinancialSecurityUtils.getCurrency(target.getTrade().getSecurity());
final ValueProperties.Builder properties = createValueProperties()
.with(ValuePropertyNames.CURRENCY, ccy.getCode())
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.CALCULATION_METHOD, pricingMethodName);
return properties;
}
protected SimpleFutureDataBundle getFutureDataBundle(final FutureSecurity security, final FunctionInputs inputs,
final HistoricalTimeSeriesBundle timeSeriesBundle, final ValueRequirement desiredValue) {
Double spotUnderlyer = null;
switch(getPricingMethodEnum()) {
case MARK_TO_MARKET:
Double marketPrice = getMarketPrice(security, inputs);
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
spotUnderlyer = getSpot(security, inputs);
}
return new SimpleFutureDataBundle(null, marketPrice, spotUnderlyer, null, null);
case COST_OF_CARRY:
spotUnderlyer = getSpot(security, inputs);
Double costOfCarry = getCostOfCarry(security, inputs);
return new SimpleFutureDataBundle(null, null, spotUnderlyer, null, costOfCarry);
case DIVIDEND_YIELD:
spotUnderlyer = getSpot(security, inputs);
Double dividendYield = timeSeriesBundle.get(DIVIDEND_YIELD_FIELD, getSpotAssetId(security)).getTimeSeries().getLatestValue();
dividendYield /= 100.0;
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
YieldAndDiscountCurve fundingCurve = getYieldCurve(security, inputs, fundingCurveName, curveConfigName);
return new SimpleFutureDataBundle(fundingCurve, null, spotUnderlyer, dividendYield, null);
default:
throw new OpenGammaRuntimeException("Unhandled pricingMethod");
}
}
@Override
public Set<ComputedValue> execute(FunctionExecutionContext executionContext, FunctionInputs inputs, ComputationTarget target, Set<ValueRequirement> desiredValues) {
final Trade trade = target.getTrade();
final FutureSecurity security = (FutureSecurity) trade.getSecurity();
// Get reference price
final HistoricalTimeSeriesBundle timeSeriesBundle = HistoricalTimeSeriesFunctionUtils.getHistoricalTimeSeriesInputs(executionContext, inputs);
final Double lastMarginPrice = timeSeriesBundle.get(MarketDataRequirementNames.MARKET_VALUE, security.getExternalIdBundle()).getTimeSeries().getLatestValue();
// Build the analytic's version of the security - the derivative
final ZonedDateTime valuationTime = executionContext.getValuationClock().zonedDateTime();
// final EquityFutureDefinition definition = CONVERTER.visitEquityFutureTrade(trade, lastMarginPrice); // TODO: Clean this up
final SimpleFutureDefinition simpleDefn = (SimpleFutureDefinition) security.accept(CONVERTER);
final EquityFutureDefinition defn = new EquityFutureDefinition(simpleDefn.getExpiry(), simpleDefn.getSettlementDate(),
simpleDefn.getReferencePrice(), simpleDefn.getCurrency(), simpleDefn.getUnitAmount());
final EquityFuture derivative = defn.toDerivative(valuationTime, lastMarginPrice);
// Build the DataBundle it requires
final ValueRequirement desiredValue = desiredValues.iterator().next();
final SimpleFutureDataBundle dataBundle = getFutureDataBundle(security, inputs, timeSeriesBundle, desiredValue);
// Call OG-Analytics
final double value = getComputedValue(derivative, dataBundle, trade);
final ValueSpecification specification = new ValueSpecification(_valueRequirementName, target.toSpecification(),
createValueProperties(target, getFundingCurveName(desiredValue), getCurveConfigName(desiredValue), getPricingMethodName()).get());
return Collections.singleton(new ComputedValue(specification, value));
}
/**
* Given _valueRequirement and _pricingMethod supplied, this calls to OG-Analytics.
* @return the required value computed and scaled by the number of contracts
*/
private double getComputedValue(EquityFuture derivative, SimpleFutureDataBundle bundle, Trade trade) {
final double value;
EquityFuturesPricer pricer = EquityFuturePricerFactory.getMethod(getPricingMethodName());
if (_valueRequirementName.equals(ValueRequirementNames.PRESENT_VALUE)) {
value = pricer.presentValue(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_DELTA)) {
value = pricer.spotDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.VALUE_RHO)) {
value = pricer.ratesDelta(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.PV01)) {
value = pricer.pv01(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.SPOT)) {
value = pricer.spotPrice(derivative, bundle);
} else if (_valueRequirementName.equals(ValueRequirementNames.FORWARD)) {
value = pricer.forwardPrice(derivative, bundle);
} else {
throw new OpenGammaRuntimeException("_valueRequirementName," + _valueRequirementName + ", unexpected. Should have been recognized in the constructor.");
}
return value;
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.TRADE;
}
@Override
public boolean canApplyTo(FunctionCompilationContext context, ComputationTarget target) {
<<<<<<< MINE
if (target.getType() != ComputationTargetType.TRADE) {
return false;
}
return target.getTrade().getSecurity() instanceof FutureSecurity; // was: EquityFutureSecurity;
=======
return target.getTrade().getSecurity() instanceof com.opengamma.financial.security.future.EquityFutureSecurity;
>>>>>>> YOURS
}
@Override
public Set<ValueRequirement> getRequirements(FunctionCompilationContext context, ComputationTarget target, ValueRequirement desiredValue) {
final FutureSecurity security = (FutureSecurity) target.getTrade().getSecurity();
final Set<ValueRequirement> requirements = new HashSet<ValueRequirement>();
// Spot
final ValueRequirement refPriceReq = getReferencePriceRequirement(context, security);
if (refPriceReq == null) {
return null;
}
requirements.add(refPriceReq);
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
switch (getPricingMethodEnum()) {
case MARK_TO_MARKET:
requirements.add(getMarketPriceRequirement(security));
if (getValueRequirementName() == ValueRequirementNames.SPOT) {
requirements.add(getSpotAssetRequirement(security));
}
break;
case COST_OF_CARRY:
requirements.add(getSpotAssetRequirement(security));
requirements.add(getCostOfCarryRequirement(security));
break;
case DIVIDEND_YIELD:
final ValueRequirement curveReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
if (curveReq == null) {
return null;
}
requirements.add(curveReq);
final ValueRequirement dividendYieldReq = getDividendYieldRequirement(context, security);
if (dividendYieldReq == null) {
return null;
}
requirements.add(dividendYieldReq);
requirements.add(getSpotAssetRequirement(security));
break;
default:
throw new OpenGammaRuntimeException("Unhandled _pricingMethod=" + _pricingMethod);
}
return requirements;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, final String curveCalculationConfigName, FutureSecurity security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, security.getCurrency().getUniqueId(), properties);
}
protected YieldAndDiscountCurve getYieldCurve(final FutureSecurity security, final FunctionInputs inputs, final String fundingCurveName, final String curveCalculationConfigName) {
final ValueRequirement curveRequirement = getDiscountCurveRequirement(fundingCurveName, curveCalculationConfigName, security);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
return (YieldAndDiscountCurve) curveObject;
}
<<<<<<< MINE
private ValueRequirement getDividendYieldRequirement(FutureSecurity security) {
ExternalId id = getSpotAssetId(security);
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, id);
=======
private ValueRequirement getDividendYieldRequirement(EquityFutureSecurity security) {
ExternalId id = security.getUnderlyingId();
return new ValueRequirement(MarketDataRequirementNames.DIVIDEND_YIELD, ComputationTargetType.PRIMITIVE, id);
>>>>>>> YOURS
}
@SuppressWarnings("unused")
private Double getDividendYield(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement dividendRequirement = getDividendYieldRequirement(security);
final Object dividendObject = inputs.getValue(dividendRequirement);
if (dividendObject == null) {
throw new OpenGammaRuntimeException("Could not get " + dividendRequirement);
}
return (Double) dividendObject;
}
<<<<<<< MINE
private ValueRequirement getSpotAssetRequirement(FutureSecurity security) {
ExternalId spotAssetId = getSpotAssetId(security);
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, spotAssetId);
=======
private ValueRequirement getSpotAssetRequirement(EquityFutureSecurity security) {
ValueRequirement req = new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
>>>>>>> YOURS
return req;
}
protected ExternalId getSpotAssetId(FutureSecurity sec) {
try {
ExternalId spotAssetId = ((EquityFutureSecurity) sec).getUnderlyingId();
return spotAssetId;
} catch (final Exception e) {
throw new OpenGammaRuntimeException(sec.getContractCategory() + " failed to find spot asset. "
+ "COST_OF_CARRY and DIVIDEND_YIELD models are only available to Futures where Spot asset prices are available. "
+ "Contact Quant if spot asset should be available for this future.");
}
}
protected Double getSpot(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement spotRequirement = getSpotAssetRequirement(security);
final Object spotObject = inputs.getValue(spotRequirement);
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get " + spotRequirement);
}
return (Double) spotObject;
}
<<<<<<< MINE
private ValueRequirement getCostOfCarryRequirement(FutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, getSpotAssetId(security));
=======
private ValueRequirement getCostOfCarryRequirement(EquityFutureSecurity security) {
return new ValueRequirement(MarketDataRequirementNames.COST_OF_CARRY, ComputationTargetType.PRIMITIVE, security.getUnderlyingId());
>>>>>>> YOURS
}
protected Double getCostOfCarry(FutureSecurity security, FunctionInputs inputs) {
ValueRequirement costOfCarryRequirement = getCostOfCarryRequirement(security);
final Object costOfCarryObject = inputs.getValue(costOfCarryRequirement);
if (costOfCarryObject == null) {
throw new OpenGammaRuntimeException("Could not get " + costOfCarryRequirement);
}
return (Double) costOfCarryObject;
}
private ValueRequirement getMarketPriceRequirement(Security security) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.SECURITY, security.getUniqueId());
}
protected Double getMarketPrice(Security security, FunctionInputs inputs) {
ValueRequirement marketPriceRequirement = getMarketPriceRequirement(security);
final Object marketPriceObject = inputs.getValue(marketPriceRequirement);
if (marketPriceObject == null) {
throw new OpenGammaRuntimeException("Could not get " + marketPriceRequirement);
}
return (Double) marketPriceObject;
}
private ValueRequirement getReferencePriceRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
ExternalIdBundle idBundle = security.getExternalIdBundle();
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(security.getExternalIdBundle(), null, null, null, MarketDataRequirementNames.MARKET_VALUE, null);
if (timeSeries == null) {
s_logger.warn("Failed to find time series for: " + idBundle.toString());
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, MarketDataRequirementNames.MARKET_VALUE,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
private ValueRequirement getDividendYieldRequirement(final FunctionCompilationContext context, final FutureSecurity security) {
final HistoricalTimeSeriesResolver resolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
final HistoricalTimeSeriesResolutionResult timeSeries = resolver.resolve(ExternalIdBundle.of(getSpotAssetId(security)), null, null, null, DIVIDEND_YIELD_FIELD, null);
if (timeSeries == null) {
return null;
}
return HistoricalTimeSeriesFunctionUtils.createHTSRequirement(timeSeries, DIVIDEND_YIELD_FIELD,
DateConstraint.VALUATION_TIME.minus(Period.ofDays(7)), true, DateConstraint.VALUATION_TIME, true);
}
/**
* Gets the valueRequirementName.
* @return the valueRequirementName
*/
protected final String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Gets the pricingMethod.
* @return the pricingMethod
*/
protected final EquityFuturesPricingMethod getPricingMethodEnum() {
return _pricingMethod;
}
/**
* Gets the pricingMethodName.
* @return the pricingMethodName
*/
protected final String getPricingMethodName() {
return _pricingMethodName;
}
private static final Logger s_logger = LoggerFactory.getLogger(EquityFuturesFunction.class);
}
Diff Result
No diff
Case 64 - java_ogplatform.rev_7b3fb_6494c..EquityIndexOptionFunction.java
Base
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.ircurve.YieldCurveFunction;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.id.UniqueId;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter; // set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// This is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(security, fundingCurveName));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.SECURITY;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getSecurity() instanceof EquityIndexOptionSecurity;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(YieldCurveFunction.PROPERTY_FUNDING_CURVE)
.withAny(ValuePropertyNames.SURFACE)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = desiredValue.getConstraint(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// Funding Curve Requirement
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", YieldCurveFunction.PROPERTY_FUNDING_CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
final ValueRequirement fundingReq = getDiscountCurveRequirement(security, fundingCurveName);
// Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security, volSurfaceName, smileInterpolator, fundingCurveName,
underlyingId);
// Return the set
return Sets.newHashSet(volReq, fundingReq, spotReq);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final UniqueId newId = UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), bbgTicker);
// Set Forward Curve Currency Property
final String curveCurrency = FinancialSecurityUtils.getCurrency(security).toString();
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
}
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.ircurve.YieldCurveFunction;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.id.UniqueId;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter; // set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// This is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(security, fundingCurveName));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.SECURITY;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getSecurity() instanceof EquityIndexOptionSecurity;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(YieldCurveFunction.PROPERTY_FUNDING_CURVE)
.withAny(ValuePropertyNames.SURFACE)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = desiredValue.getConstraint(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// Funding Curve Requirement
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", YieldCurveFunction.PROPERTY_FUNDING_CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
final ValueRequirement fundingReq = getDiscountCurveRequirement(security, fundingCurveName);
// Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security, volSurfaceName, smileInterpolator, fundingCurveName,
underlyingId);
// Return the set
return Sets.newHashSet(volReq, fundingReq, spotReq);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final UniqueId newId = UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), bbgTicker);
// Set Forward Curve Currency Property
final String curveCurrency = FinancialSecurityUtils.getCurrency(security).toString();
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
}
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
Left
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.id.UniqueId;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter; // set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.SECURITY;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getSecurity() instanceof EquityIndexOptionSecurity;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
// TODO: One should not be required to pass the FundingCurve and CurveConfig names, so that the VolatilitySurface can build an EquityForwardCurve
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final UniqueId newId = UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), bbgTicker);
// Set Forward Curve Currency Property
final String curveCurrency = FinancialSecurityUtils.getCurrency(security).toString();
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfig)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.id.UniqueId;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter; // set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.SECURITY;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
return target.getSecurity() instanceof EquityIndexOptionSecurity;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
// TODO: One should not be required to pass the FundingCurve and CurveConfig names, so that the VolatilitySurface can build an EquityForwardCurve
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final UniqueId newId = UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), bbgTicker);
// Set Forward Curve Currency Property
final String curveCurrency = FinancialSecurityUtils.getCurrency(security).toString();
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfig)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
Right
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.ircurve.YieldCurveFunction;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter; // set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// This is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(security, fundingCurveName));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(YieldCurveFunction.PROPERTY_FUNDING_CURVE)
.withAny(ValuePropertyNames.SURFACE)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = desiredValue.getConstraint(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// Funding Curve Requirement
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", YieldCurveFunction.PROPERTY_FUNDING_CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
final ValueRequirement fundingReq = getDiscountCurveRequirement(security, fundingCurveName);
// Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security, volSurfaceName, smileInterpolator, fundingCurveName,
underlyingId);
// Return the set
return Sets.newHashSet(volReq, fundingReq, spotReq);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
}
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.ircurve.YieldCurveFunction;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter; // set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// This is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(security, fundingCurveName));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(YieldCurveFunction.PROPERTY_FUNDING_CURVE)
.withAny(ValuePropertyNames.SURFACE)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = desiredValue.getConstraint(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// Funding Curve Requirement
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", YieldCurveFunction.PROPERTY_FUNDING_CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
final ValueRequirement fundingReq = getDiscountCurveRequirement(security, fundingCurveName);
// Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security, volSurfaceName, smileInterpolator, fundingCurveName,
underlyingId);
// Return the set
return Sets.newHashSet(volReq, fundingReq, spotReq);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
}
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
MergeMethods
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter;
// set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
<<<<<<< MINE
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
=======
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
>>>>>>> YOURS
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter;
// set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
<<<<<<< MINE
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
=======
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
>>>>>>> YOURS
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
KeepBothMethods
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter;
// set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
// TODO: One should not be required to pass the FundingCurve and CurveConfig names, so that the VolatilitySurface can build an EquityForwardCurve
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final UniqueId newId = UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), bbgTicker);
// Set Forward Curve Currency Property
final String curveCurrency = FinancialSecurityUtils.getCurrency(security).toString();
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfig)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
<<<<<<< MINE
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
=======
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
>>>>>>> YOURS
}
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter;
// set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
// TODO: One should not be required to pass the FundingCurve and CurveConfig names, so that the VolatilitySurface can build an EquityForwardCurve
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final UniqueId newId = UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), bbgTicker);
// Set Forward Curve Currency Property
final String curveCurrency = FinancialSecurityUtils.getCurrency(security).toString();
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfig)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
<<<<<<< MINE
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
=======
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
>>>>>>> YOURS
}
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
Safe
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter;
// set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
// TODO: One should not be required to pass the FundingCurve and CurveConfig names, so that the VolatilitySurface can build an EquityForwardCurve
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final UniqueId newId = UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), bbgTicker);
// Set Forward Curve Currency Property
final String curveCurrency = FinancialSecurityUtils.getCurrency(security).toString();
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfig)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
<<<<<<< MINE
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
=======
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
>>>>>>> YOURS
}
<<<<<<< MINE
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
}
=======
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
>>>>>>> YOURS
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter;
// set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
// TODO: One should not be required to pass the FundingCurve and CurveConfig names, so that the VolatilitySurface can build an EquityForwardCurve
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final UniqueId newId = UniqueId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName(), bbgTicker);
// Set Forward Curve Currency Property
final String curveCurrency = FinancialSecurityUtils.getCurrency(security).toString();
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfig)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
<<<<<<< MINE
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
=======
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
>>>>>>> YOURS
}
<<<<<<< MINE
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
}
=======
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
>>>>>>> YOURS
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
Unstructured
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter; // set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
// TODO: One should not be required to pass the FundingCurve and CurveConfig names, so that the VolatilitySurface can build an EquityForwardCurve
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
<<<<<<< MINE
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfig)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
=======
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
>>>>>>> YOURS
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
<<<<<<< MINE
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
=======
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
}
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
>>>>>>> YOURS
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.equity.indexoption;
import java.util.Collections;
import java.util.Set;
import javax.time.calendar.ZonedDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.equity.StaticReplicationDataBundle;
import com.opengamma.analytics.financial.equity.option.EquityIndexOption;
import com.opengamma.analytics.financial.equity.option.EquityIndexOptionDefinition;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldCurve;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurface;
import com.opengamma.analytics.financial.model.volatility.surface.BlackVolatilitySurfaceMoneyness;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeries;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.holiday.HolidaySource;
import com.opengamma.core.id.ExternalSchemes;
import com.opengamma.core.region.RegionSource;
import com.opengamma.core.security.Security;
import com.opengamma.core.value.MarketDataRequirementNames;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.OpenGammaExecutionContext;
import com.opengamma.financial.analytics.conversion.EquityIndexOptionConverter;
import com.opengamma.financial.analytics.model.InstrumentTypeProperties;
import com.opengamma.financial.analytics.model.forex.option.black.FXOptionBlackFunction;
import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues;
import com.opengamma.financial.convention.ConventionBundleSource;
import com.opengamma.financial.security.FinancialSecurityTypes;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.security.option.EquityBarrierOptionSecurity;
import com.opengamma.financial.security.option.EquityIndexOptionSecurity;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.async.AsynchronousExecution;
import com.opengamma.util.money.Currency;
/**
*
*/
public abstract class EquityIndexOptionFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(EquityIndexOptionFunction.class);
private final String _valueRequirementName;
private EquityIndexOptionConverter _converter; // set in init(), not constructor
// Get rid of this, refactor EquityIndexOptionFundingCurveSensitivitiesFunction
protected final EquityIndexOptionConverter getConverter() {
return _converter;
}
public EquityIndexOptionFunction(final String valueRequirementName) {
ArgumentChecker.notNull(valueRequirementName, "value requirement name");
_valueRequirementName = valueRequirementName;
}
@Override
public void init(final FunctionCompilationContext context) {
final HolidaySource holidaySource = OpenGammaCompilationContext.getHolidaySource(context);
final RegionSource regionSource = OpenGammaCompilationContext.getRegionSource(context);
final ConventionBundleSource conventionSource = OpenGammaCompilationContext.getConventionBundleSource(context);
_converter = new EquityIndexOptionConverter(holidaySource, conventionSource, regionSource);
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target,
final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
// 1. Build the analytic derivative to be priced
final ZonedDateTime now = executionContext.getValuationClock().zonedDateTime();
final EquityIndexOptionSecurity security = getEquityIndexOptionSecurity(target);
final ExternalId underlyingId = security.getUnderlyingId();
final EquityIndexOptionDefinition defn = _converter.visitEquityIndexOptionSecurity(security);
final EquityIndexOption derivative = (EquityIndexOption) defn.toDerivative(now);
if (derivative.getTimeToSettlement() < 0.0) {
throw new OpenGammaRuntimeException("EquityIndexOption with expiry, " + security.getExpiry().getExpiry().toString() + ", has already settled.");
}
// 2. Build up the market data bundle
final StaticReplicationDataBundle market = buildMarketBundle(underlyingId, executionContext, inputs, target, desiredValues);
// 3. The Calculation - what we came here to do
final Object results = computeValues(derivative, market);
// 4. Create Result's Specification that matches the properties promised and Return
final ValueRequirement desiredValue = desiredValues.iterator().next();
//final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), desiredValue.getConstraints());
final ValueSpecification spec = new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target, desiredValue, executionContext).get());
return Collections.singleton(new ComputedValue(spec, results));
}
// buildMarketBundle is re-used by EquityIndexVanillaBarrierOptionFunction, hence is available to call */
protected StaticReplicationDataBundle buildMarketBundle(final ExternalId underlyingId, final FunctionExecutionContext executionContext,
final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
final Security security = target.getSecurity();
final ValueRequirement desiredValue = desiredValues.iterator().next();
// a. The Spot Index
final Object spotObject = inputs.getValue(getSpotRequirement(underlyingId));
if (spotObject == null) {
throw new OpenGammaRuntimeException("Could not get Underlying's Spot value");
}
final double spot = (Double) spotObject;
// b. The Funding Curve
final String fundingCurveName = desiredValue.getConstraint(ValuePropertyNames.CURVE);
final String curveConfigName = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Object fundingObject = inputs.getValue(getDiscountCurveRequirement(fundingCurveName, curveConfigName, security));
if (fundingObject == null) {
throw new OpenGammaRuntimeException("Could not get Funding Curve");
}
if (!(fundingObject instanceof YieldCurve)) { //TODO: make it more generic
throw new IllegalArgumentException("Can only handle YieldCurve");
}
final YieldCurve fundingCurve = (YieldCurve) fundingObject;
// c. The Vol Surface
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolator = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
final Object volSurfaceObject = inputs.getValue(getVolatilitySurfaceRequirement(OpenGammaExecutionContext.getHistoricalTimeSeriesSource(executionContext), security, volSurfaceName,
smileInterpolator, curveConfigName, fundingCurveName, underlyingId));
if (volSurfaceObject == null || !(volSurfaceObject instanceof BlackVolatilitySurface)) {
throw new OpenGammaRuntimeException("Could not get Volatility Surface");
}
final BlackVolatilitySurface<?> blackVolSurf = (BlackVolatilitySurface<?>) volSurfaceObject;
// d. Forward Curve
final ForwardCurve forwardCurve;
if (blackVolSurf instanceof BlackVolatilitySurfaceMoneyness) { // Use forwards tied to vols if available
forwardCurve = ((BlackVolatilitySurfaceMoneyness) blackVolSurf).getForwardCurve();
} else {
forwardCurve = new ForwardCurve(spot, fundingCurve.getCurve()); // else build from spot and funding curve
}
final StaticReplicationDataBundle market = new StaticReplicationDataBundle(blackVolSurf, fundingCurve, forwardCurve);
return market;
}
protected abstract Object computeValues(final EquityIndexOption derivative, final StaticReplicationDataBundle market);
@Override
public ComputationTargetType getTargetType() {
return FinancialSecurityTypes.EQUITY_INDEX_OPTION_SECURITY;
}
protected EquityIndexOptionSecurity getEquityIndexOptionSecurity(final ComputationTarget target) {
return (EquityIndexOptionSecurity) target.getSecurity();
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
return Collections.singleton(new ValueSpecification(getValueRequirementName(), target.toSpecification(), createValueProperties(target).get()));
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.withAny(ValuePropertyNames.SURFACE)
.withAny(ValuePropertyNames.CURVE)
.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG)
.withAny(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
}
protected ValueProperties.Builder createValueProperties(final ComputationTarget target, ValueRequirement desiredValue, FunctionExecutionContext executionContext) {
final String fundingCurveName = getFundingCurveName(desiredValue);
final String curveConfigName = getCurveConfigName(desiredValue);
final String volSurfaceName = desiredValue.getConstraint(ValuePropertyNames.SURFACE);
final String smileInterpolatorName = desiredValue.getConstraint(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
ValueProperties.Builder builder = createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, FXOptionBlackFunction.BLACK_METHOD)
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfigName)
.with(ValuePropertyNames.SURFACE, volSurfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolatorName)
.with(ValuePropertyNames.CURRENCY, FinancialSecurityUtils.getCurrency(target.getSecurity()).getCode());
return builder;
}
protected String getFundingCurveName(ValueRequirement desiredValue) {
final Set<String> fundingCurves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
if (fundingCurves == null || fundingCurves.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE);
return null;
}
final String fundingCurveName = fundingCurves.iterator().next();
return fundingCurveName;
}
protected String getCurveConfigName(ValueRequirement desiredValue) {
final Set<String> curveConfigNames = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigNames == null || curveConfigNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.CURVE_CALCULATION_CONFIG);
return null;
}
final String curveConfigName = curveConfigNames.iterator().next();
return curveConfigName;
}
protected ValueRequirement getDiscountCurveRequirement(String fundingCurveName, String curveCalculationConfigName, Security security) {
ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfigName)
.get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, FinancialSecurityUtils.getCurrency(security).getUniqueId(), properties);
}
@Override
/**
* Get the set of ValueRequirements
* If null, engine will attempt to find a default, and call function again
*/
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final ValueProperties constraints = desiredValue.getConstraints();
// Get security and its underlying's ExternalId.
final Security security = target.getSecurity();
final ExternalId underlyingId;
if (security instanceof EquityIndexOptionSecurity) {
underlyingId = ((EquityIndexOptionSecurity) security).getUnderlyingId();
} else if (security instanceof EquityBarrierOptionSecurity) {
underlyingId = ((EquityBarrierOptionSecurity) security).getUnderlyingId();
} else {
throw new OpenGammaRuntimeException("EquityIndexOptionFunction does not handle this security type: " + security.getSecurityType());
}
// 1. Spot Index Requirement
final ValueRequirement spotReq = getSpotRequirement(underlyingId);
// 2. Funding Curve Requirement
// Funding curve
final String fundingCurveName = getFundingCurveName(desiredValue);
if (fundingCurveName == null) {
return null;
}
// Curve configuration
final String curveConfigName = getCurveConfigName(desiredValue);
if (curveConfigName == null) {
return null;
}
final ValueRequirement fundingReq = getDiscountCurveRequirement(fundingCurveName, curveConfigName, security);
// 3. Volatility Surface Requirement
// Surface Name
final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
if (surfaceNames == null || surfaceNames.size() != 1) {
s_logger.info("Could not find {} requirement. Looking for a default..", ValuePropertyNames.SURFACE);
return null;
}
final String volSurfaceName = surfaceNames.iterator().next();
// Interpolator Name
final Set<String> interpolators = constraints.getValues(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR);
if (interpolators == null || interpolators.size() != 1) {
return null;
}
final String smileInterpolator = interpolators.iterator().next();
// TODO: REVIEW THIS - TimeSeriesSource, used to get Ticker, the Vol ComputationTarget, from ExternalIdBundle
final ValueRequirement volReq = getVolatilitySurfaceRequirement(OpenGammaCompilationContext.getHistoricalTimeSeriesSource(context), security,
volSurfaceName, smileInterpolator, curveConfigName, fundingCurveName, underlyingId);
// Return the set
return Sets.newHashSet(spotReq, fundingReq, volReq);
}
// TODO: One should not be required to pass the FundingCurve and CurveConfig names, so that the VolatilitySurface can build an EquityForwardCurve
protected ValueRequirement getVolatilitySurfaceRequirement(final HistoricalTimeSeriesSource tsSource, final Security security,
final String surfaceName, final String smileInterpolator, final String curveConfig, final String fundingCurveName, final ExternalId underlyingBuid) {
// Targets for equity vol surfaces are the underlying tickers
final String bbgTicker = getBloombergTicker(tsSource, underlyingBuid);
final ExternalId newId = ExternalId.of(ExternalSchemes.BLOOMBERG_TICKER_WEAK, bbgTicker);
// Set Forward Curve Currency Property
final Currency curveCurrency = FinancialSecurityUtils.getCurrency(security);
final ValueProperties properties = ValueProperties.builder()
.with(ValuePropertyNames.SURFACE, surfaceName)
.with(BlackVolatilitySurfacePropertyNamesAndValues.PROPERTY_SMILE_INTERPOLATOR, smileInterpolator)
<<<<<<< MINE
.with(ValuePropertyNames.CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveConfig)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency)
=======
.with(YieldCurveFunction.PROPERTY_FUNDING_CURVE, fundingCurveName)
.with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency.getCode())
>>>>>>> YOURS
.with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION)
.get();
return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, ComputationTargetType.PRIMITIVE, newId, properties);
}
/*
* Get the Ticker from the BUID via the HistoricalTimeSeriesSource.<p>
* This might seem like a strange way to do it. It is. The reason is that only the tsSource appeared to contain the ExternalIdBundle!
* TODO: Find a more appropriate way.
*/
protected String getBloombergTicker(final HistoricalTimeSeriesSource tsSource, final ExternalId underlyingBuid) {
if (tsSource == null || underlyingBuid == null) {
throw new OpenGammaRuntimeException("Unable to find option underlyer's ticker from the ExternalIdBundle");
} else {
final HistoricalTimeSeries historicalTimeSeries = tsSource.getHistoricalTimeSeries("PX_LAST", ExternalIdBundle.of(underlyingBuid), null, null, true, null, true, 1);
if (historicalTimeSeries == null) {
throw new OpenGammaRuntimeException("We require a time series for " + underlyingBuid);
}
final ExternalIdBundle idBundle = tsSource.getExternalIdBundle(historicalTimeSeries.getUniqueId());
String bbgTicker = (idBundle.getExternalId(ExternalSchemes.BLOOMBERG_TICKER)).getValue();
return bbgTicker;
}
}
protected ValueRequirement getSpotRequirement(final ExternalId underlyingId) {
<<<<<<< MINE
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, UniqueId.of(underlyingId.getScheme().getName(), underlyingId.getValue()));
// Alternatively, as in EquityFuturesFunction: ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, security.getUnderlyingId());
=======
return new ValueRequirement(MarketDataRequirementNames.MARKET_VALUE, ComputationTargetType.PRIMITIVE, underlyingId);
}
protected ValueRequirement getDiscountCurveRequirement(final Security security, final String fundingCurveName) {
final ValueProperties properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, fundingCurveName).get();
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(FinancialSecurityUtils.getCurrency(security)), properties);
>>>>>>> YOURS
}
protected final String getValueRequirementName() {
return _valueRequirementName;
}
}
Diff Result
No diff
Case 65 - java_ogplatform.rev_7b3fb_6494c..ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.java
Base
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
entries[i] = -value; // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME, true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties.get());
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
entries[i] = -value; // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME, true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties.get());
}
}
Left
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve, BigDecimal qty) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
entries[i] = -value * (qty.doubleValue() / 10000d); // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME, true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties.get());
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve, BigDecimal qty) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
entries[i] = -value * (qty.doubleValue() / 10000d); // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME, true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetType.PRIMITIVE, currency.getUniqueId(), properties.get());
}
}
Right
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity,
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
entries[i] = -value; // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME,
true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity,
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
entries[i] = -value; // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME,
true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
MergeMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target).with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec, final YieldAndDiscountCurve curve, BigDecimal qty) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
// we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
entries[i] = -value;
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME, true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target).with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec, final YieldAndDiscountCurve curve, BigDecimal qty) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
// we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
entries[i] = -value;
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME, true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
KeepBothMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target).with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec, final YieldAndDiscountCurve curve, BigDecimal qty) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
// we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
entries[i] = -value * (qty.doubleValue() / 10000d);
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec, final YieldAndDiscountCurve curve) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
// we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
entries[i] = -value;
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME, true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs, final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target).with(ValuePropertyNames.CURVE, curveName).with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec, final YieldAndDiscountCurve curve, BigDecimal qty) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
// we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
entries[i] = -value * (qty.doubleValue() / 10000d);
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec, final YieldAndDiscountCurve curve) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
// we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
entries[i] = -value;
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME, true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
Safe
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
<<<<<<< MINE
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity,
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
entries[i] = -value; // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
=======
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve, BigDecimal qty) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
entries[i] = -value * (qty.doubleValue() / 10000d); // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
>>>>>>> YOURS
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME,
true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
<<<<<<< MINE
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity,
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
entries[i] = -value; // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
=======
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve, BigDecimal qty) {
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
entries[i] = -value * (qty.doubleValue() / 10000d); // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
>>>>>>> YOURS
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME,
true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
Unstructured
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
<<<<<<< MINE
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve, BigDecimal qty) {
=======
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity,
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve) {
>>>>>>> YOURS
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
<<<<<<< MINE
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
entries[i] = -value * (qty.doubleValue() / 10000d); // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
=======
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
entries[i] = -value; // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
>>>>>>> YOURS
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME,
true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.sensitivities;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import javax.time.calendar.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.analytics.math.matrix.DoubleMatrix1D;
import com.opengamma.core.security.Security;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.function.AbstractFunction;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.OpenGammaCompilationContext;
import com.opengamma.financial.analytics.ircurve.FixedIncomeStripWithSecurity;
import com.opengamma.financial.analytics.ircurve.InterpolatedYieldCurveSpecificationWithSecurities;
import com.opengamma.financial.analytics.model.YieldCurveNodeSensitivitiesHelper;
import com.opengamma.financial.analytics.timeseries.DateConstraint;
import com.opengamma.financial.analytics.timeseries.HistoricalTimeSeriesFunctionUtils;
import com.opengamma.financial.security.FinancialSecurityUtils;
import com.opengamma.financial.sensitivities.FactorExposureData;
import com.opengamma.financial.sensitivities.FactorType;
import com.opengamma.financial.sensitivities.RawSecurityUtils;
import com.opengamma.financial.sensitivities.SecurityEntryData;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolutionResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.security.RawSecurity;
import com.opengamma.util.money.Currency;
/**
*
*/
public class ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction extends AbstractFunction.NonCompiledInvoker {
private static final Logger s_logger = LoggerFactory.getLogger(ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.class);
/**
* The value name calculated by this function.
*/
public static final String YCNS_REQUIREMENT = ValueRequirementNames.YIELD_CURVE_NODE_SENSITIVITIES;
private static final CharSequence SWAP_TEXT = "SWAP";
private HistoricalTimeSeriesResolver _htsResolver;
@Override
public void init(final FunctionCompilationContext context) {
// REVIEW: jim 24-Oct-2012 -- this is a terrible, terrible hack. Blame Andrew Griffin - he told me to do it.
_htsResolver = OpenGammaCompilationContext.getHistoricalTimeSeriesResolver(context);
}
@Override
public ComputationTargetType getTargetType() {
return ComputationTargetType.POSITION;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof RawSecurity)) {
return false;
}
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
return security.getSecurityType().equals(SecurityEntryData.EXTERNAL_SENSITIVITIES_SECURITY_TYPE);
}
private ValueProperties.Builder createValueProperties(final ComputationTarget target) {
final Security security = target.getPosition().getSecurity();
final String currency = FinancialSecurityUtils.getCurrency(security).getCode();
final ValueProperties.Builder properties = createValueProperties();
properties.with(ValuePropertyNames.CURRENCY, currency);
properties.with(ValuePropertyNames.CURVE_CURRENCY, currency);
return properties;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target) {
final ValueProperties.Builder properties = createValueProperties(target);
properties.withAny(ValuePropertyNames.CURVE);
properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
final ComputationTargetSpecification targetSpec = target.toSpecification();
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
s_logger.debug("getResults(1) = " + results);
return results;
}
@Override
public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue) {
final Set<String> curves = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE);
final Set<String> curveCalcConfigs = desiredValue.getConstraints().getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if ((curves == null) || (curves.size() != 1)) {
s_logger.warn("no curve specified");
// Can't support an unbound request; an injection function must be used (or declare all as optional and use [PLAT-1771])
return null;
}
if ((curveCalcConfigs == null) || (curveCalcConfigs.size() != 1)) {
s_logger.warn("no curve config specified");
return null;
}
final String curve = curves.iterator().next();
final String curveCalcConfig = curveCalcConfigs.iterator().next();
final Set<ValueRequirement> requirements = Sets.newHashSet();
requirements.add(getCurveRequirement(target, curve, curveCalcConfig));
requirements.add(getCurveSpecRequirement(target, curve));
requirements.addAll(getSensitivityRequirements(context.getSecuritySource(), (RawSecurity) target.getPosition().getSecurity()));
return requirements;
}
@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) {
String curveName = null;
String curveCalculationConfig = null;
final ComputationTargetSpecification targetSpec = target.toSpecification();
final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
assert curveName == null;
assert curveCalculationConfig == null;
curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
curveCalculationConfig = input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
assert curveName != null;
assert curveCalculationConfig != null;
final ValueProperties.Builder properties = createValueProperties(target);
properties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
}
}
s_logger.debug("getResults(2) returning " + results);
return results;
}
@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
String curveName = null;
String curveCalculationConfig = null;
for (final ValueRequirement requirement : desiredValues) {
final ValueProperties constraints = requirement.getConstraints();
final Set<String> values = constraints.getValues(ValuePropertyNames.CURVE);
if (values != null) {
curveName = values.iterator().next();
}
final Set<String> curveConfigValues = constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
if (curveConfigValues != null) {
curveCalculationConfig = curveConfigValues.iterator().next();
}
}
assert curveName != null;
assert curveCalculationConfig != null;
final RawSecurity security = (RawSecurity) target.getPosition().getSecurity();
final BigDecimal qty = target.getPosition().getQuantity();
final ValueRequirement curveRequirement = getCurveRequirement(target, curveName, curveCalculationConfig);
final Object curveObject = inputs.getValue(curveRequirement);
if (curveObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveRequirement);
}
Object curveSpecObject = null;
final ValueRequirement curveSpecRequirement = getCurveSpecRequirement(target, curveName);
curveSpecObject = inputs.getValue(curveSpecRequirement);
if (curveSpecObject == null) {
throw new OpenGammaRuntimeException("Could not get " + curveSpecRequirement);
}
final YieldAndDiscountCurve curve = (YieldAndDiscountCurve) curveObject;
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec = (InterpolatedYieldCurveSpecificationWithSecurities) curveSpecObject;
final LinkedHashMap<String, YieldAndDiscountCurve> interpolatedCurves = new LinkedHashMap<String, YieldAndDiscountCurve>();
interpolatedCurves.put(curveName, curve);
final YieldCurveBundle bundle = new YieldCurveBundle(interpolatedCurves);
final DoubleMatrix1D sensitivitiesForCurves = getSensitivities(executionContext.getSecuritySource(), inputs, security, curveSpec, curve, qty);
final ValueProperties.Builder properties = createValueProperties(target)
.with(ValuePropertyNames.CURVE, curveName)
.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
final ComputationTargetSpecification targetSpec = target.toSpecification();
final ValueSpecification resultSpec = new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get());
final Set<ComputedValue> results = YieldCurveNodeSensitivitiesHelper.getInstrumentLabelledSensitivitiesForCurve(curveName, bundle, sensitivitiesForCurves, curveSpec, resultSpec);
//s_logger.debug("execute, returning " + results);
return results;
}
<<<<<<< MINE
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity, final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve, BigDecimal qty) {
=======
private DoubleMatrix1D getSensitivities(final SecuritySource secSource, final FunctionInputs inputs, final RawSecurity rawSecurity,
final InterpolatedYieldCurveSpecificationWithSecurities curveSpec,
final YieldAndDiscountCurve curve) {
>>>>>>> YOURS
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
final double[] entries = new double[curveSpec.getStrips().size()];
int i = 0;
for (final FixedIncomeStripWithSecurity strip : curveSpec.getStrips()) {
final FactorExposureData externalSensitivitiesData = searchForTenorMatch(decodedSensitivities, strip);
if (externalSensitivitiesData != null) {
final ComputedValue computedValue = inputs.getComputedValue(getSensitivityRequirement(externalSensitivitiesData.getExposureExternalId()));
if (computedValue != null) {
<<<<<<< MINE
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = (Double) mhts.getTimeSeries().getLatestValue();
entries[i] = -value * (qty.doubleValue() / 10000d); // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
=======
final ManageableHistoricalTimeSeries mhts = (ManageableHistoricalTimeSeries) computedValue.getValue();
final Double value = mhts.getTimeSeries().getLatestValue();
entries[i] = -value; // we invert here because OpenGamma uses -1bp shift rather than +1. DV01 function will invert back.
>>>>>>> YOURS
} else {
s_logger.warn("Value was null when getting required input data " + externalSensitivitiesData.getExposureExternalId());
entries[i] = 0d;
}
} else {
entries[i] = 0d;
}
i++;
}
return new DoubleMatrix1D(entries);
}
private FactorExposureData searchForTenorMatch(final Collection<FactorExposureData> exposures, final FixedIncomeStripWithSecurity strip) {
for (final FactorExposureData exposure : exposures) {
if (exposure.getFactorType().equals(FactorType.YIELD) && exposure.getFactorName().contains(SWAP_TEXT)) {
if (exposure.getNode() != null && exposure.getNode().length() > 0) {
final Period nodePeriod = Period.parse("P" + exposure.getNode());
if (strip.getTenor().getPeriod().totalMonths() == nodePeriod.totalMonths()) {
return exposure;
}
}
}
}
return null;
}
@Override
public String getShortName() {
return "ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction";
}
protected Set<ValueRequirement> getSensitivityRequirements(final SecuritySource secSource, final RawSecurity rawSecurity) {
final Set<ValueRequirement> requirements = Sets.newHashSet();
final Collection<FactorExposureData> decodedSensitivities = RawSecurityUtils.decodeFactorExposureData(secSource, rawSecurity);
for (final FactorExposureData exposureEntry : decodedSensitivities) {
requirements.add(getSensitivityRequirement(exposureEntry.getExposureExternalId()));
}
return requirements;
}
protected ValueRequirement getSensitivityRequirement(final ExternalId externalId) {
final HistoricalTimeSeriesResolutionResult resolutionResult = _htsResolver.resolve(ExternalIdBundle.of(externalId), null, null, null, "EXPOSURE", null);
final ValueRequirement htsRequirement = HistoricalTimeSeriesFunctionUtils.createHTSRequirement(resolutionResult, "EXPOSURE", DateConstraint.VALUATION_TIME, true, DateConstraint.VALUATION_TIME,
true);
return htsRequirement;
//return new ValueRequirement();
//return new ValueRequirement(/*ExternalDataRequirementNames.SENSITIVITY*/"EXPOSURE", ComputationTargetType.PRIMITIVE, UniqueId.of(externalId.getScheme().getName(), externalId.getValue()));
}
protected ValueRequirement getCurveRequirement(final ComputationTarget target, final String curveName, final String curveCalculationConfig) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.with(ValuePropertyNames.CURVE, curveName);
properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE, ComputationTargetSpecification.of(currency), properties.get());
}
protected ValueRequirement getCurveSpecRequirement(final ComputationTarget target, final String curveName) {
final Currency currency = FinancialSecurityUtils.getCurrency(target.getPosition().getSecurity());
final ValueProperties.Builder properties = ValueProperties.builder().with(ValuePropertyNames.CURVE, curveName);
return new ValueRequirement(ValueRequirementNames.YIELD_CURVE_SPEC, ComputationTargetSpecification.of(currency), properties.get());
}
}
Diff Result
No diff
Case 66 - java_ogplatform.rev_7d0f7_a0710..FloatingInterestRateLeg.java
Base
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
private static final long serialVersionUID = -1687373124272353047l;
private final com.opengamma.id.UniqueIdentifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private final double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.UniqueIdentifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not UniqueIdentifier message", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not present");
try {
_initialFloatingRate = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.UniqueIdentifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.UniqueIdentifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public double getInitialFloatingRate () {
return _initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != msg._initialFloatingRate) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc = (hc * 31) + (int)_initialFloatingRate;
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
///CLOVER:ON
// CSON: Generated File
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
private static final long serialVersionUID = -1687373124272353047l;
private final com.opengamma.id.UniqueIdentifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private final double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.UniqueIdentifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not UniqueIdentifier message", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not present");
try {
_initialFloatingRate = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.UniqueIdentifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.UniqueIdentifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public double getInitialFloatingRate () {
return _initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != msg._initialFloatingRate) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc = (hc * 31) + (int)_initialFloatingRate;
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
///CLOVER:ON
// CSON: Generated File
Left
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
private static final long serialVersionUID = -1684252699689325303l;
private final com.opengamma.id.UniqueIdentifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.UniqueIdentifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not UniqueIdentifier message", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.UniqueIdentifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.UniqueIdentifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
///CLOVER:ON
// CSON: Generated File
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
private static final long serialVersionUID = -1684252699689325303l;
private final com.opengamma.id.UniqueIdentifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.UniqueIdentifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not UniqueIdentifier message", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.UniqueIdentifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.UniqueIdentifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
///CLOVER:ON
// CSON: Generated File
Right
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
private static final long serialVersionUID = -847370103773043016l;
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private final double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not present");
try {
_initialFloatingRate = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public double getInitialFloatingRate () {
return _initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != msg._initialFloatingRate) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc = (hc * 31) + (int)_initialFloatingRate;
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
///CLOVER:ON
// CSON: Generated File
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
private static final long serialVersionUID = -847370103773043016l;
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private final double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not present");
try {
_initialFloatingRate = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public double getInitialFloatingRate () {
return _initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != msg._initialFloatingRate) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc = (hc * 31) + (int)_initialFloatingRate;
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
///CLOVER:ON
// CSON: Generated File
MergeMethods
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
<<<<<<< MINE
private static final long serialVersionUID = -1684252699689325303l;
=======
private static final long serialVersionUID = -847370103773043016l;
>>>>>>> YOURS
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
<<<<<<< MINE
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
=======
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
>>>>>>> YOURS
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
<<<<<<< MINE
private static final long serialVersionUID = -1684252699689325303l;
=======
private static final long serialVersionUID = -847370103773043016l;
>>>>>>> YOURS
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
<<<<<<< MINE
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
=======
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
>>>>>>> YOURS
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
KeepBothMethods
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
<<<<<<< MINE
private static final long serialVersionUID = -1684252699689325303l;
=======
private static final long serialVersionUID = -847370103773043016l;
>>>>>>> YOURS
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
<<<<<<< MINE
private static final long serialVersionUID = -1684252699689325303l;
=======
private static final long serialVersionUID = -847370103773043016l;
>>>>>>> YOURS
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
Safe
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
<<<<<<< MINE
private static final long serialVersionUID = -1684252699689325303l;
=======
private static final long serialVersionUID = -847370103773043016l;
>>>>>>> YOURS
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
<<<<<<< MINE
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
=======
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
>>>>>>> YOURS
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
<<<<<<< MINE
private static final long serialVersionUID = -1684252699689325303l;
=======
private static final long serialVersionUID = -847370103773043016l;
>>>>>>> YOURS
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
<<<<<<< MINE
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
=======
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
>>>>>>> YOURS
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
Unstructured
// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
<<<<<<< MINE
private static final long serialVersionUID = -1684252699689325303l;
private final com.opengamma.id.UniqueIdentifier _floatingReferenceRateIdentifier;
=======
private static final long serialVersionUID = -847370103773043016l;
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
>>>>>>> YOURS
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
<<<<<<< MINE
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
=======
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
>>>>>>> YOURS
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
<<<<<<< MINE
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not UniqueIdentifier message", e);
=======
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not present");
try {
_initialFloatingRate = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
>>>>>>> YOURS
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
///CLOVER:ON
// CSON: Generated File// Automatically created - do not modify
///CLOVER:OFF
// CSOFF: Generated File
package com.opengamma.financial.security.swap;
public class FloatingInterestRateLeg extends com.opengamma.financial.security.swap.InterestRateLeg implements java.io.Serializable {
public <T> T accept (SwapLegVisitor<T> visitor) { return visitor.visitFloatingInterestRateLeg (this); }
<<<<<<< MINE
private static final long serialVersionUID = -1684252699689325303l;
private final com.opengamma.id.UniqueIdentifier _floatingReferenceRateIdentifier;
=======
private static final long serialVersionUID = -847370103773043016l;
private final com.opengamma.id.Identifier _floatingReferenceRateIdentifier;
>>>>>>> YOURS
public static final String FLOATING_REFERENCE_RATE_IDENTIFIER_KEY = "floatingReferenceRateIdentifier";
private Double _initialFloatingRate;
public static final String INITIAL_FLOATING_RATE_KEY = "initialFloatingRate";
private final double _spread;
public static final String SPREAD_KEY = "spread";
private final boolean _isIBOR;
public static final String IS_IBOR_KEY = "isIBOR";
<<<<<<< MINE
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, double spread, boolean isIBOR) {
=======
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.Identifier floatingReferenceRateIdentifier, double initialFloatingRate, double spread, boolean isIBOR) {
>>>>>>> YOURS
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
super (fudgeContext, fudgeMsg);
org.fudgemsg.FudgeField fudgeField;
fudgeField = fudgeMsg.getByName (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not present");
try {
_floatingReferenceRateIdentifier = com.opengamma.id.Identifier.fromFudgeMsg (fudgeContext, fudgeMsg.getFieldValue (org.fudgemsg.FudgeMsg.class, fudgeField));
}
catch (IllegalArgumentException e) {
<<<<<<< MINE
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not UniqueIdentifier message", e);
=======
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'floatingReferenceRateIdentifier' is not Identifier message", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not present");
try {
_initialFloatingRate = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
>>>>>>> YOURS
}
fudgeField = fudgeMsg.getByName (SPREAD_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not present");
try {
_spread = fudgeMsg.getFieldValue (Double.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'spread' is not double", e);
}
fudgeField = fudgeMsg.getByName (IS_IBOR_KEY);
if (fudgeField == null) throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not present");
try {
_isIBOR = fudgeMsg.getFieldValue (Boolean.class, fudgeField);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'isIBOR' is not boolean", e);
}
fudgeField = fudgeMsg.getByName (INITIAL_FLOATING_RATE_KEY);
if (fudgeField != null) {
try {
setInitialFloatingRate (fudgeMsg.getFieldValue (Double.class, fudgeField));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException ("Fudge message is not a FloatingInterestRateLeg - field 'initialFloatingRate' is not double", e);
}
}
}
public FloatingInterestRateLeg (com.opengamma.financial.convention.daycount.DayCount dayCount, com.opengamma.financial.convention.frequency.Frequency frequency, com.opengamma.id.Identifier regionIdentifier, com.opengamma.financial.convention.businessday.BusinessDayConvention businessDayConvention, com.opengamma.financial.security.swap.Notional notional, com.opengamma.id.UniqueIdentifier floatingReferenceRateIdentifier, Double initialFloatingRate, double spread, boolean isIBOR) {
super (dayCount, frequency, regionIdentifier, businessDayConvention, notional);
if (floatingReferenceRateIdentifier == null) throw new NullPointerException ("'floatingReferenceRateIdentifier' cannot be null");
else {
_floatingReferenceRateIdentifier = floatingReferenceRateIdentifier;
}
_initialFloatingRate = initialFloatingRate;
_spread = spread;
_isIBOR = isIBOR;
}
protected FloatingInterestRateLeg (final FloatingInterestRateLeg source) {
super (source);
if (source == null) throw new NullPointerException ("'source' must not be null");
if (source._floatingReferenceRateIdentifier == null) _floatingReferenceRateIdentifier = null;
else {
_floatingReferenceRateIdentifier = source._floatingReferenceRateIdentifier;
}
_initialFloatingRate = source._initialFloatingRate;
_spread = source._spread;
_isIBOR = source._isIBOR;
}
public FloatingInterestRateLeg clone () {
return new FloatingInterestRateLeg (this);
}
public org.fudgemsg.FudgeMsg toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext) {
if (fudgeContext == null) throw new NullPointerException ("fudgeContext must not be null");
final org.fudgemsg.MutableFudgeMsg msg = fudgeContext.newMessage ();
toFudgeMsg (fudgeContext, msg);
return msg;
}
public void toFudgeMsg (final org.fudgemsg.mapping.FudgeSerializationContext fudgeContext, final org.fudgemsg.MutableFudgeMsg msg) {
super.toFudgeMsg (fudgeContext, msg);
if (_floatingReferenceRateIdentifier != null) {
final org.fudgemsg.MutableFudgeMsg fudge1 = org.fudgemsg.mapping.FudgeSerializationContext.addClassHeader (fudgeContext.newMessage (), _floatingReferenceRateIdentifier.getClass (), com.opengamma.id.Identifier.class);
_floatingReferenceRateIdentifier.toFudgeMsg (fudgeContext, fudge1);
msg.add (FLOATING_REFERENCE_RATE_IDENTIFIER_KEY, null, fudge1);
}
if (_initialFloatingRate != null) {
msg.add (INITIAL_FLOATING_RATE_KEY, null, _initialFloatingRate);
}
msg.add (SPREAD_KEY, null, _spread);
msg.add (IS_IBOR_KEY, null, _isIBOR);
}
public static FloatingInterestRateLeg fromFudgeMsg (final org.fudgemsg.mapping.FudgeDeserializationContext fudgeContext, final org.fudgemsg.FudgeMsg fudgeMsg) {
final java.util.List<org.fudgemsg.FudgeField> types = fudgeMsg.getAllByOrdinal (0);
for (org.fudgemsg.FudgeField field : types) {
final String className = (String)field.getValue ();
if ("com.opengamma.financial.security.swap.FloatingInterestRateLeg".equals (className)) break;
try {
return (com.opengamma.financial.security.swap.FloatingInterestRateLeg)Class.forName (className).getDeclaredMethod ("fromFudgeMsg", org.fudgemsg.mapping.FudgeDeserializationContext.class, org.fudgemsg.FudgeMsg.class).invoke (null, fudgeContext, fudgeMsg);
}
catch (Throwable t) {
// no-action
}
}
return new FloatingInterestRateLeg (fudgeContext, fudgeMsg);
}
public com.opengamma.id.Identifier getFloatingReferenceRateIdentifier () {
return _floatingReferenceRateIdentifier;
}
public Double getInitialFloatingRate () {
return _initialFloatingRate;
}
public void setInitialFloatingRate (Double initialFloatingRate) {
_initialFloatingRate = initialFloatingRate;
}
public double getSpread () {
return _spread;
}
public boolean getIsIBOR () {
return _isIBOR;
}
public boolean equals (final Object o) {
if (o == this) return true;
if (!(o instanceof FloatingInterestRateLeg)) return false;
FloatingInterestRateLeg msg = (FloatingInterestRateLeg)o;
if (_floatingReferenceRateIdentifier != null) {
if (msg._floatingReferenceRateIdentifier != null) {
if (!_floatingReferenceRateIdentifier.equals (msg._floatingReferenceRateIdentifier)) return false;
}
else return false;
}
else if (msg._floatingReferenceRateIdentifier != null) return false;
if (_initialFloatingRate != null) {
if (msg._initialFloatingRate != null) {
if (!_initialFloatingRate.equals (msg._initialFloatingRate)) return false;
}
else return false;
}
else if (msg._initialFloatingRate != null) return false;
if (_spread != msg._spread) return false;
if (_isIBOR != msg._isIBOR) return false;
return super.equals (msg);
}
public int hashCode () {
int hc = super.hashCode ();
hc *= 31;
if (_floatingReferenceRateIdentifier != null) hc += _floatingReferenceRateIdentifier.hashCode ();
hc *= 31;
if (_initialFloatingRate != null) hc += _initialFloatingRate.hashCode ();
hc = (hc * 31) + (int)_spread;
hc *= 31;
if (_isIBOR) hc++;
return hc;
}
public String toString () {
return org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this, org.apache.commons.lang.builder.ToStringStyle.SHORT_PREFIX_STYLE);
}
}
///CLOVER:ON
// CSON: Generated File
Diff Result
No diff
Case 67 - java_ogplatform.rev_96129_d2fd7..RequirementBasedWebViewGrid.java
Base
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.cometd.Client;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private static final String GRID_STRUCTURE_ROOT_CHANNEL = "/gridStructure";
private final String _columnStructureChannel;
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createDefaultTargetResult(rowId);
// Whether or not the row is in the viewport, we may have to store history
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
}
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
if (valuesToSend != null) {
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
}
private Map<String, Object> createDefaultTargetResult(Integer rowId) {
Map<String, Object> valuesToSend;
valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
return valuesToSend;
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
sendColumnDetails(Collections.singleton(column));
}
return converter;
}
private void sendColumnDetails(Collection<WebViewGridColumn> columnDetails) {
getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(columnDetails), null);
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<UniqueId, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(), getLocalClient(), getRemoteClient());
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
WebViewDepGraphGrid grid = _depGraphGrids.remove(cell);
return grid;
}
}
private Object getDepGraphIfRequested(WebGridCell cell, String calcConfigName, ValueSpecification valueSpecification, Long resultTimestamp) {
WebViewDepGraphGrid depGraphGrid = _depGraphGrids.get(cell);
if (depGraphGrid == null) {
return null;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
return null;
}
try {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
return null;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
return structureMessage;
} else {
return depGraph;
}
} finally {
cycleReference.release();
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.cometd.Client;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private static final String GRID_STRUCTURE_ROOT_CHANNEL = "/gridStructure";
private final String _columnStructureChannel;
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createDefaultTargetResult(rowId);
// Whether or not the row is in the viewport, we may have to store history
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
}
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
if (valuesToSend != null) {
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
}
private Map<String, Object> createDefaultTargetResult(Integer rowId) {
Map<String, Object> valuesToSend;
valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
return valuesToSend;
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
sendColumnDetails(Collections.singleton(column));
}
return converter;
}
private void sendColumnDetails(Collection<WebViewGridColumn> columnDetails) {
getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(columnDetails), null);
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<UniqueId, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(), getLocalClient(), getRemoteClient());
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
WebViewDepGraphGrid grid = _depGraphGrids.remove(cell);
return grid;
}
}
private Object getDepGraphIfRequested(WebGridCell cell, String calcConfigName, ValueSpecification valueSpecification, Long resultTimestamp) {
WebViewDepGraphGrid depGraphGrid = _depGraphGrids.get(cell);
if (depGraphGrid == null) {
return null;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
return null;
}
try {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
return null;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
return structureMessage;
} else {
return depGraph;
}
} finally {
cycleReference.release();
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
Left
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.cometd.Client;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private static final String GRID_STRUCTURE_ROOT_CHANNEL = "/gridStructure";
private final String _columnStructureChannel;
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
sendColumnDetails(Collections.singleton(column));
}
return converter;
}
private void sendColumnDetails(Collection<WebViewGridColumn> columnDetails) {
getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(columnDetails), null);
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
WebViewDepGraphGrid grid = _depGraphGrids.remove(cell);
return grid;
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.cometd.Client;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private static final String GRID_STRUCTURE_ROOT_CHANNEL = "/gridStructure";
private final String _columnStructureChannel;
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
sendColumnDetails(Collections.singleton(column));
}
return converter;
}
private void sendColumnDetails(Collection<WebViewGridColumn> columnDetails) {
getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(columnDetails), null);
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
WebViewDepGraphGrid grid = _depGraphGrids.remove(cell);
return grid;
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
Right
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
// publishes results to the client TODO would it be better if it returned the value?
/**
* @return {@code {"rowId": rowId, "0": col0Val, "1": col1Val, ...}}
* cell values: {"v": value, "h": [historyVal1, historyVal2, ...], "dg", depGraph}
*/
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
// Result not in the grid
return null; // TODO empty map?
}
Map<String, Object> valuesToSend = createDefaultTargetResult(rowId);
// Whether or not the row is in the viewport, we may have to store history
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter;
if (originalValue == null) {
converter = null;
} else {
converter = getConverter(column, value.getSpecification().getValueName(), originalValue.getClass());
}
Map<String, Object> cellData = getCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
}
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
return valuesToSend; // TODO empty map if null?
}
private Map<String, Object> createDefaultTargetResult(Integer rowId) {
Map<String, Object> valuesToSend;
valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
return valuesToSend;
}
// TODO this publishes to the client. not nice for a method named get*
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<UniqueId, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
// TODO move this logic to updateDepGraphCells
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
private Object getDepGraphIfRequested(WebGridCell cell, String calcConfigName, ValueSpecification valueSpecification, Long resultTimestamp) {
WebViewDepGraphGrid depGraphGrid = _depGraphGrids.get(cell);
if (depGraphGrid == null) {
return null;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
return null;
}
try {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
return null;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
return structureMessage;
} else {
return depGraph;
}
} finally {
cycleReference.release();
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
// publishes results to the client TODO would it be better if it returned the value?
/**
* @return {@code {"rowId": rowId, "0": col0Val, "1": col1Val, ...}}
* cell values: {"v": value, "h": [historyVal1, historyVal2, ...], "dg", depGraph}
*/
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
// Result not in the grid
return null; // TODO empty map?
}
Map<String, Object> valuesToSend = createDefaultTargetResult(rowId);
// Whether or not the row is in the viewport, we may have to store history
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter;
if (originalValue == null) {
converter = null;
} else {
converter = getConverter(column, value.getSpecification().getValueName(), originalValue.getClass());
}
Map<String, Object> cellData = getCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
}
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
return valuesToSend; // TODO empty map if null?
}
private Map<String, Object> createDefaultTargetResult(Integer rowId) {
Map<String, Object> valuesToSend;
valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
return valuesToSend;
}
// TODO this publishes to the client. not nice for a method named get*
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<UniqueId, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
// TODO move this logic to updateDepGraphCells
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
private Object getDepGraphIfRequested(WebGridCell cell, String calcConfigName, ValueSpecification valueSpecification, Long resultTimestamp) {
WebViewDepGraphGrid depGraphGrid = _depGraphGrids.get(cell);
if (depGraphGrid == null) {
return null;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
return null;
}
try {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
return null;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
return structureMessage;
} else {
return depGraph;
}
} finally {
cycleReference.release();
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
MergeMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
<<<<<<< MINE
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
=======
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
>>>>>>> YOURS
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
<<<<<<< MINE
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
=======
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
>>>>>>> YOURS
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
KeepBothMethods
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
// publishes results to the client TODO would it be better if it returned the value?
/**
* @return {@code {"rowId": rowId, "0": col0Val, "1": col1Val, ...}}
* cell values: {"v": value, "h": [historyVal1, historyVal2, ...], "dg", depGraph}
*/
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
// Result not in the grid
return null; // TODO empty map?
}
Map<String, Object> valuesToSend = createDefaultTargetResult(rowId);
// Whether or not the row is in the viewport, we may have to store history
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter;
if (originalValue == null) {
converter = null;
} else {
converter = getConverter(column, value.getSpecification().getValueName(), originalValue.getClass());
}
Map<String, Object> cellData = getCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
}
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
return valuesToSend; // TODO empty map if null?
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
<<<<<<< MINE
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
=======
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
>>>>>>> YOURS
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
// publishes results to the client TODO would it be better if it returned the value?
/**
* @return {@code {"rowId": rowId, "0": col0Val, "1": col1Val, ...}}
* cell values: {"v": value, "h": [historyVal1, historyVal2, ...], "dg", depGraph}
*/
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
// Result not in the grid
return null; // TODO empty map?
}
Map<String, Object> valuesToSend = createDefaultTargetResult(rowId);
// Whether or not the row is in the viewport, we may have to store history
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter;
if (originalValue == null) {
converter = null;
} else {
converter = getConverter(column, value.getSpecification().getValueName(), originalValue.getClass());
}
Map<String, Object> cellData = getCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
}
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
return valuesToSend; // TODO empty map if null?
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
<<<<<<< MINE
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
=======
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
>>>>>>> YOURS
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
Safe
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
<<<<<<< MINE
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
// Result not in the grid
return null; // TODO empty map?
}
Map<String, Object> valuesToSend = createDefaultTargetResult(rowId);
// Whether or not the row is in the viewport, we may have to store history
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter;
if (originalValue == null) {
converter = null;
} else {
converter = getConverter(column, value.getSpecification().getValueName(), originalValue.getClass());
}
Map<String, Object> cellData = getCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
}
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
return valuesToSend; // TODO empty map if null?
}
=======
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
>>>>>>> YOURS
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
<<<<<<< MINE
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
=======
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
>>>>>>> YOURS
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
_columnStructureChannel = GRID_STRUCTURE_ROOT_CHANNEL + "/" + name + "/columns";
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
<<<<<<< MINE
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
if (rowId == null) {
// Result not in the grid
return null; // TODO empty map?
}
Map<String, Object> valuesToSend = createDefaultTargetResult(rowId);
// Whether or not the row is in the viewport, we may have to store history
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter;
if (originalValue == null) {
converter = null;
} else {
converter = getConverter(column, value.getSpecification().getValueName(), originalValue.getClass());
}
Map<String, Object> cellData = getCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
}
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
return valuesToSend; // TODO empty map if null?
}
=======
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
// Result not in the grid
return;
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
>>>>>>> YOURS
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
//-------------------------------------------------------------------------
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
<<<<<<< MINE
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
=======
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
>>>>>>> YOURS
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
Unstructured
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
<<<<<<< MINE
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
=======
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
>>>>>>> YOURS
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
<<<<<<< MINE
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
=======
// publishes results to the client TODO would it be better if it returned the value?
/**
* @return {@code {"rowId": rowId, "0": col0Val, "1": col1Val, ...}}
* cell values: {"v": value, "h": [historyVal1, historyVal2, ...], "dg", depGraph}
*/
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
>>>>>>> YOURS
if (rowId == null) {
// Result not in the grid
return null; // TODO empty map?
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
<<<<<<< MINE
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
=======
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter;
if (originalValue == null) {
converter = null;
} else {
converter = getConverter(column, value.getSpecification().getValueName(), originalValue.getClass());
}
Map<String, Object> cellData = getCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
>>>>>>> YOURS
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
<<<<<<< MINE
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
=======
return valuesToSend; // TODO empty map if null?
>>>>>>> YOURS
}
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
// TODO this publishes to the client. not nice for a method named get*
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
// TODO move this logic to updateDepGraphCells
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
<<<<<<< MINE
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
=======
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
>>>>>>> YOURS
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.ViewTargetResultModel;
import com.opengamma.engine.view.calc.EngineResourceReference;
import com.opengamma.engine.view.calc.ViewCycle;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverter;
import com.opengamma.web.server.conversion.ResultConverterCache;
import it.unimi.dsi.fastutil.longs.LongArraySet;
import it.unimi.dsi.fastutil.longs.LongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* An abstract base class for dynamically-structured, requirement-based grids.
*/
public abstract class RequirementBasedWebViewGrid extends WebViewGrid {
private static final Logger s_logger = LoggerFactory.getLogger(RequirementBasedWebViewGrid.class);
private final RequirementBasedGridStructure _gridStructure;
private final String _nullCellValue;
// Column-based state: few entries expected so using an array set
private final LongSet _historyOutputs = new LongArraySet();
// Cell-based state
private final ConcurrentMap<WebGridCell, WebViewDepGraphGrid> _depGraphGrids = new ConcurrentHashMap<WebGridCell, WebViewDepGraphGrid>();
<<<<<<< MINE
protected RequirementBasedWebViewGrid(String name, ViewClient viewClient, CompiledViewDefinition compiledViewDefinition, List<ComputationTargetSpecification> targets,
EnumSet<ComputationTargetType> targetTypes, ResultConverterCache resultConverterCache, Client local, Client remote, String nullCellValue) {
super(name, viewClient, resultConverterCache, local, remote);
=======
protected RequirementBasedWebViewGrid(String name,
ViewClient viewClient,
CompiledViewDefinition compiledViewDefinition,
List<UniqueId> targets,
EnumSet<ComputationTargetType> targetTypes,
ResultConverterCache resultConverterCache,
String nullCellValue) {
super(name, viewClient, resultConverterCache);
>>>>>>> YOURS
List<RequirementBasedColumnKey> requirements = getRequirements(compiledViewDefinition.getViewDefinition(), targetTypes);
_gridStructure = new RequirementBasedGridStructure(compiledViewDefinition, targetTypes, requirements, targets);
_nullCellValue = nullCellValue;
}
//-------------------------------------------------------------------------
<<<<<<< MINE
public void processTargetResult(ComputationTargetSpecification target, ViewTargetResultModel resultModel, Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target);
=======
// publishes results to the client TODO would it be better if it returned the value?
/**
* @return {@code {"rowId": rowId, "0": col0Val, "1": col1Val, ...}}
* cell values: {"v": value, "h": [historyVal1, historyVal2, ...], "dg", depGraph}
*/
public Map<String, Object> getTargetResult(ComputationTargetSpecification target,
ViewTargetResultModel resultModel,
Long resultTimestamp) {
Integer rowId = getGridStructure().getRowId(target.getUniqueId());
>>>>>>> YOURS
if (rowId == null) {
// Result not in the grid
return null; // TODO empty map?
}
Map<String, Object> valuesToSend = createTargetResult(rowId);
for (Integer unsatisfiedColId : getGridStructure().getUnsatisfiedCells(rowId)) {
valuesToSend.put(Integer.toString(unsatisfiedColId), null);
}
// Whether or not the row is in the viewport, we may have to store history
<<<<<<< MINE
if (resultModel != null) {
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
=======
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter;
if (originalValue == null) {
converter = null;
} else {
converter = getConverter(column, value.getSpecification().getValueName(), originalValue.getClass());
}
Map<String, Object> cellData = getCellValue(cell, specification, originalValue, resultTimestamp, converter);
Object depGraph = getDepGraphIfRequested(cell, calcConfigName, specification, resultTimestamp);
if (depGraph != null) {
if (cellData == null) {
cellData = new HashMap<String, Object>();
}
cellData.put("dg", depGraph);
>>>>>>> YOURS
}
Object originalValue = value.getValue();
for (WebViewGridColumn column : columns) {
int colId = column.getId();
WebGridCell cell = WebGridCell.of(rowId, colId);
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
Map<String, Object> cellData = processCellValue(cell, specification, originalValue, resultTimestamp, converter);
if (cellData != null) {
valuesToSend.put(Integer.toString(colId), cellData);
}
}
}
}
}
<<<<<<< MINE
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
=======
return valuesToSend; // TODO empty map if null?
>>>>>>> YOURS
}
private Map<String, Object> createTargetResult(Integer rowId) {
Map<String, Object> valuesToSend = new HashMap<String, Object>();
valuesToSend.put("rowId", rowId);
return valuesToSend;
}
public void processDepGraphs(long resultTimestamp) {
if (_depGraphGrids.isEmpty()) {
return;
}
// TODO: this may not be the cycle corresponding to the result - some tracking of cycle IDs required
EngineResourceReference<? extends ViewCycle> cycleReference = getViewClient().createLatestCycleReference();
if (cycleReference == null) {
// Unable to get a cycle reference - perhaps no cycle has completed since enabling introspection
s_logger.warn("Unable to get a cycle reference");
return;
}
try {
for (WebViewDepGraphGrid depGraphGrid : _depGraphGrids.values()) {
Object gridStructure = null;
if (!depGraphGrid.isInit()) {
String calcConfigName = depGraphGrid.getParentCalcConfigName();
ValueSpecification valueSpecification = depGraphGrid.getParentValueSpecification();
DependencyGraphExplorer explorer = cycleReference.get().getCompiledViewDefinition().getDependencyGraphExplorer(calcConfigName);
DependencyGraph subgraph = explorer.getSubgraphProducing(valueSpecification);
if (subgraph == null) {
s_logger.warn("No subgraph producing value specification {}", valueSpecification);
continue;
}
if (depGraphGrid.init(subgraph, calcConfigName, valueSpecification)) {
gridStructure = depGraphGrid.getInitialJsonGridStructure();
}
}
Map<String, Object> depGraph = depGraphGrid.processViewCycle(cycleReference.get(), resultTimestamp);
Object depGraphMessage = null;
if (gridStructure != null) {
Map<String, Object> structureMessage = new HashMap<String, Object>();
structureMessage.put("grid", gridStructure);
structureMessage.put("update", depGraph);
depGraphMessage = structureMessage;
} else {
depGraphMessage = depGraph;
}
Map<String, Object> valuesToSend = createTargetResult(depGraphGrid.getParentGridCell().getRowId());
Map<String, Object> columnMessage = new HashMap<String, Object>();
columnMessage.put("dg", depGraphMessage);
s_logger.warn("Dep graph message: " + depGraphMessage);
valuesToSend.put(Integer.toString(depGraphGrid.getParentGridCell().getColumnId()), columnMessage);
getRemoteClient().deliver(getLocalClient(), getUpdateChannel(), valuesToSend, null);
}
} finally {
cycleReference.release();
}
}
// TODO this publishes to the client. not nice for a method named get*
@SuppressWarnings("unchecked")
private ResultConverter<Object> getConverter(WebViewGridColumn column, String valueName, Class<?> valueType) {
// Ensure the converter is cached against the value name before sending the column details
ResultConverter<Object> converter = (ResultConverter<Object>) getConverterCache().getAndCacheConverter(valueName, valueType);
if (!column.isTypeKnown()) {
// TODO what's this all about?
//getRemoteClient().deliver(getLocalClient(), _columnStructureChannel, getJsonColumnStructures(Collections.singleton(column)), null);
}
return converter;
}
@Override
public Map<String, Object> getInitialJsonGridStructure() {
Map<String, Object> gridStructure = super.getInitialJsonGridStructure();
gridStructure.put("columns", getJsonColumnStructures(getGridStructure().getColumns()));
return gridStructure;
}
@Override
protected List<Object> getInitialJsonRowStructures() {
List<Object> rowStructures = new ArrayList<Object>();
for (Map.Entry<ComputationTargetSpecification, Integer> targetEntry : getGridStructure().getTargets().entrySet()) {
Map<String, Object> rowDetails = new HashMap<String, Object>();
UniqueId target = targetEntry.getKey().getUniqueId();
int rowId = targetEntry.getValue();
rowDetails.put("rowId", rowId);
addRowDetails(target, rowId, rowDetails);
rowStructures.add(rowDetails);
}
return rowStructures;
}
private Map<String, Object> getJsonColumnStructures(Collection<WebViewGridColumn> columns) {
Map<String, Object> columnStructures = new HashMap<String, Object>(columns.size());
for (WebViewGridColumn columnDetails : columns) {
columnStructures.put(Integer.toString(columnDetails.getId()), getJsonColumnStructure(columnDetails));
}
return columnStructures;
}
private Map<String, Object> getJsonColumnStructure(WebViewGridColumn column) {
Map<String, Object> detailsToSend = new HashMap<String, Object>();
long colId = column.getId();
detailsToSend.put("colId", colId);
detailsToSend.put("header", column.getHeader());
detailsToSend.put("description", column.getValueName() + ":\n" + column.getDescription());
detailsToSend.put("nullValue", _nullCellValue);
String resultType = getConverterCache().getKnownResultTypeName(column.getValueName());
if (resultType != null) {
column.setTypeKnown(true);
detailsToSend.put("dataType", resultType);
// Hack - the client should decide which columns it requires history for, taking into account the capabilities of
// the renderer.
if (resultType.equals("DOUBLE")) {
addHistoryOutput(column.getId());
}
}
return detailsToSend;
}
protected abstract void addRowDetails(UniqueId target, int rowId, Map<String, Object> details);
//-------------------------------------------------------------------------
protected RequirementBasedGridStructure getGridStructure() {
return _gridStructure;
}
//-------------------------------------------------------------------------
private void addHistoryOutput(long colId) {
_historyOutputs.add(colId);
}
@Override
protected boolean isHistoryOutput(WebGridCell cell) {
return _historyOutputs.contains(cell.getColumnId());
}
//-------------------------------------------------------------------------
// TODO does this belong in the portfolio-specific subclass? or can / will you be able to get dep graphs for primitives?
public WebViewGrid getDepGraphGrid(String name) {
// TODO implement RequirementBasedWebViewGrid.getDepGraphGrid()
throw new UnsupportedOperationException("getDepGraphGrid not implemented");
}
/* package */ void updateDepGraphCells(List<WebGridCell> dependencyGraphCells) {
// TODO implement
}
// TODO move this logic to updateDepGraphCells
public WebViewGrid setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
if (includeDepGraph) {
<<<<<<< MINE
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
OperationTimer timer = new OperationTimer(s_logger, "depgraph");
Pair<String, ValueSpecification> columnMappingPair = getGridStructure().findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
s_logger.warn("includeDepGraph took {}", timer.finished());
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache(),
getLocalClient(), getRemoteClient(), cell, columnMappingPair.getFirst(), columnMappingPair.getSecond());
=======
String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
WebViewDepGraphGrid grid = new WebViewDepGraphGrid(gridName, getViewClient(), getConverterCache());
>>>>>>> YOURS
_depGraphGrids.putIfAbsent(cell, grid);
return grid;
} else {
return _depGraphGrids.remove(cell);
}
}
//-------------------------------------------------------------------------
@Override
protected String[][] getCsvColumnHeaders() {
Collection<WebViewGridColumn> columns = getGridStructure().getColumns();
int additionalColumns = getAdditionalCsvColumnCount();
int columnCount = columns.size() + additionalColumns;
String[] header1 = new String[columnCount];
String[] header2 = new String[columnCount];
supplementCsvColumnHeaders(header1);
int offset = getCsvDataColumnOffset();
for (WebViewGridColumn column : columns) {
header1[offset + column.getId()] = column.getHeader();
header2[offset + column.getId()] = column.getDescription();
}
return new String[][] {header1, header2};
}
@Override
protected String[][] getCsvRows(ViewComputationResultModel result) {
String[][] rows = new String[getGridStructure().getTargets().size()][];
int columnCount = getGridStructure().getColumns().size() + getAdditionalCsvColumnCount();
int offset = getCsvDataColumnOffset();
for (ComputationTargetSpecification target : result.getAllTargets()) {
Integer rowId = getGridStructure().getRowId(target);
if (rowId == null) {
continue;
}
ViewTargetResultModel resultModel = result.getTargetResult(target);
String[] values = new String[columnCount];
supplementCsvRowData(rowId, target, values);
rows[rowId] = values;
for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
for (ComputedValue value : resultModel.getAllValues(calcConfigName)) {
Object originalValue = value.getValue();
if (originalValue == null) {
continue;
}
ValueSpecification specification = value.getSpecification();
Collection<WebViewGridColumn> columns = getGridStructure().getColumns(calcConfigName, specification);
if (columns == null) {
// Expect a column for every value
s_logger.warn("Could not find column for calculation configuration {} with value specification {}", calcConfigName, specification);
continue;
}
for (WebViewGridColumn column : columns) {
int colId = column.getId();
ResultConverter<Object> converter = originalValue != null ? getConverter(column, value.getSpecification().getValueName(), originalValue.getClass()) : null;
values[offset + colId] = converter.convertToText(getConverterCache(), value.getSpecification(), originalValue);
}
}
}
}
return rows;
}
protected int getAdditionalCsvColumnCount() {
return 0;
}
protected int getCsvDataColumnOffset() {
return 0;
}
protected void supplementCsvColumnHeaders(String[] headers) {
}
protected void supplementCsvRowData(int rowId, ComputationTargetSpecification target, String[] row) {
}
//-------------------------------------------------------------------------
private static List<RequirementBasedColumnKey> getRequirements(ViewDefinition viewDefinition, EnumSet<ComputationTargetType> targetTypes) {
List<RequirementBasedColumnKey> result = new ArrayList<RequirementBasedColumnKey>();
for (ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
String calcConfigName = calcConfig.getName();
if (targetTypes.contains(ComputationTargetType.POSITION) || targetTypes.contains(ComputationTargetType.PORTFOLIO_NODE)) {
for (Pair<String, ValueProperties> portfolioOutput : calcConfig.getAllPortfolioRequirements()) {
String valueName = portfolioOutput.getFirst();
ValueProperties constraints = portfolioOutput.getSecond();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
for (ValueRequirement specificRequirement : calcConfig.getSpecificRequirements()) {
if (!targetTypes.contains(specificRequirement.getTargetSpecification().getType())) {
continue;
}
String valueName = specificRequirement.getValueName();
ValueProperties constraints = specificRequirement.getConstraints();
RequirementBasedColumnKey columnKey = new RequirementBasedColumnKey(calcConfigName, valueName, constraints);
result.add(columnKey);
}
}
return result;
}
}
Diff Result
No diff
Case 68 - java_ogplatform.rev_96129_d2fd7..WebView.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import javax.time.Instant;
import org.apache.commons.lang.ObjectUtils;
import org.cometd.Client;
import org.cometd.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.marketdata.spec.MarketDataSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionFlags;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverterCache;
/**
*
*/
public class WebView {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private static final String STARTED_DISPLAY_NAME = "Live";
private static final String PAUSED_DISPLAY_NAME = "Paused";
private final Client _local;
private final Client _remote;
private final ViewClient _client;
private final String _viewDefinitionName;
private final ViewExecutionOptions _executionOptions;
private final ExecutorService _executorService;
private final ResultConverterCache _resultConverterCache;
private final ReentrantLock _updateLock = new ReentrantLock();
private boolean _awaitingNextUpdate;
private boolean _continueUpdateThread;
private boolean _updateThreadRunning;
private AtomicBoolean _isInit = new AtomicBoolean(false);
private final Map<String, WebViewGrid> _gridsByName;
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final String viewDefinitionName,
final ViewExecutionOptions executionOptions, final UserPrincipal user, final ExecutorService executorService,
final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_viewDefinitionName = viewDefinitionName;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
MarketDataSpecification marketDataSpec;
EnumSet<ViewExecutionFlags> flags;
client.attachToViewProcess(viewDefinitionName, executionOptions);
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
_isInit.set(true);
RequirementBasedWebViewGrid portfolioGrid = new WebViewPortfolioGrid(getViewClient(), compiledViewDefinition, getResultConverterCache(), getLocal(), getRemote());
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridsByName.put(_portfolioGrid.getName(), _portfolioGrid);
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(getViewClient(), compiledViewDefinition, getResultConverterCache(), getLocal(), getRemote());
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridsByName.put(_primitivesGrid.getName(), _primitivesGrid);
}
notifyInitialized();
}
private void notifyInitialized() {
getRemote().deliver(getLocal(), "/changeView", getInitialJsonGridStructures(), null);
}
/*package*/ void reconnected() {
if (_isInit.get()) {
notifyInitialized();
}
}
//-------------------------------------------------------------------------
// Update control
public void pause() {
getViewClient().pause();
sendViewStatus(false, PAUSED_DISPLAY_NAME);
}
public void resume() {
getViewClient().resume();
sendViewStatus(true, STARTED_DISPLAY_NAME);
}
public void shutdown() {
// Removes all listeners
getViewClient().shutdown();
}
public String getViewDefinitionName() {
return _viewDefinitionName;
}
public ViewExecutionOptions getExecutionOptions() {
return _executionOptions;
}
public boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
return getViewDefinitionName().equals(viewDefinitionName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
public WebViewGrid getGridByName(String name) {
return _gridsByName.get(name);
}
@SuppressWarnings("unchecked")
public void triggerUpdate(Message message) {
Map<String, Object> dataMap = (Map<String, Object>) message.getData();
boolean immediateResponse = (Boolean) dataMap.get("immediateResponse");
if (getPortfolioGrid() != null) {
Map<String, Object> portfolioViewport = (Map<String, Object>) dataMap.get("portfolioViewport");
getPortfolioGrid().setViewport(processViewportData(portfolioViewport));
}
if (getPrimitivesGrid() != null) {
Map<String, Object> primitiveViewport = (Map<String, Object>) dataMap.get("primitiveViewport");
getPrimitivesGrid().setViewport(processViewportData(primitiveViewport));
}
// Can only provide an immediate response if there is a result available
immediateResponse &= getViewClient().isResultAvailable();
_updateLock.lock();
try {
if (immediateResponse) {
sendImmediateUpdate();
} else {
_awaitingNextUpdate = true;
}
} finally {
_updateLock.unlock();
}
}
private SortedMap<Integer, Long> processViewportData(Map<String, Object> viewportData) {
SortedMap<Integer, Long> result = new TreeMap<Integer, Long>();
if (viewportData.isEmpty()) {
return result;
}
Object[] ids = (Object[]) viewportData.get("rowIds");
Object[] lastTimes = (Object[]) viewportData.get("lastTimestamps");
for (int i = 0; i < ids.length; i++) {
if (ids[i] instanceof Number) {
long jsRowId = (Long) ids[i];
int rowId = (int) jsRowId;
if (lastTimes[i] != null) {
Long lastTime = (Long) lastTimes[i];
result.put(rowId, lastTime);
} else {
result.put(rowId, null);
}
} else {
throw new OpenGammaRuntimeException("Unexpected type of webId: " + ids[i]);
}
}
return result;
}
private void sendImmediateUpdate() {
_updateLock.lock();
try {
if (!_updateThreadRunning) {
_updateThreadRunning = true;
runUpdateThread();
} else {
_continueUpdateThread = true;
}
} finally {
_updateLock.unlock();
}
}
private void runUpdateThread() {
getExecutorService().submit(new Runnable() {
@Override
public void run() {
do {
ViewComputationResultModel update = getViewClient().getLatestResult();
getRemote().startBatch();
long valuationTimeMillis = update.getValuationTime().toEpochMillisLong();
long calculationDurationMillis = update.getCalculationDuration().toMillisLong();
sendStartMessage(valuationTimeMillis, calculationDurationMillis);
try {
processResult(update);
} catch (Exception e) {
s_logger.error("Error processing result from view cycle " + update.getViewCycleId(), e);
}
sendEndMessage();
getRemote().endBatch();
} while (continueUpdateThread());
}
});
}
private boolean continueUpdateThread() {
_updateLock.lock();
try {
if (_continueUpdateThread) {
_continueUpdateThread = false;
return true;
} else {
_updateThreadRunning = false;
return false;
}
} finally {
_updateLock.unlock();
}
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch (target.getType()) {
case PRIMITIVE:
if (getPrimitivesGrid() != null) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (getPortfolioGrid() != null) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
break;
default:
// Something that the client does not display
continue;
}
}
}
/**
* Tells the remote client that updates are starting.
*/
private void sendStartMessage(long valuationTimeEpochMillis, long calculationDurationMillis) {
Map<String, Object> startMessage = new HashMap<String, Object>();
startMessage.put("valuationTime", valuationTimeEpochMillis);
startMessage.put("calculationDuration", calculationDurationMillis);
getRemote().deliver(getLocal(), "/updates/control/start", startMessage, null);
}
/**
* Tells the remote client that updates have finished.
*/
private void sendEndMessage() {
getRemote().deliver(getLocal(), "/updates/control/end", new HashMap<String, Object>(), null);
}
private void sendViewStatus(boolean isRunning, String status) {
Map<String, Object> output = new HashMap<String, Object>();
output.put("isRunning", isRunning);
output.put("status", status);
getRemote().deliver(getLocal(), "/status", output, null);
}
//-------------------------------------------------------------------------
public Map<String, Object> getInitialJsonGridStructures() {
Map<String, Object> gridStructures = new HashMap<String, Object>();
if (getPrimitivesGrid() != null) {
gridStructures.put("primitives", getPrimitivesGrid().getInitialJsonGridStructure());
}
if (getPortfolioGrid() != null) {
gridStructures.put("portfolio", getPortfolioGrid().getInitialJsonGridStructure());
}
return gridStructures;
}
public void setIncludeDepGraph(String parentGridName, WebGridCell cell, boolean includeDepGraph) {
if (!getPortfolioGrid().getName().equals(parentGridName)) {
throw new OpenGammaRuntimeException("Invalid or unknown grid for dependency graph viewing: " + parentGridName);
}
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
getViewClient().setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
getViewClient().setViewCycleAccessSupported(false);
}
}
WebViewGrid grid = getPortfolioGrid().setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
registerGrid(grid);
} else {
unregisterGrid(grid.getName());
}
}
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new OpenGammaRuntimeException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = getViewClient().getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
private void registerGrid(WebViewGrid grid) {
_gridsByName.put(grid.getName(), grid);
}
private void unregisterGrid(String gridName) {
_gridsByName.remove(gridName);
}
//-------------------------------------------------------------------------
private ExecutorService getExecutorService() {
return _executorService;
}
private RequirementBasedWebViewGrid getPortfolioGrid() {
return _portfolioGrid;
}
private RequirementBasedWebViewGrid getPrimitivesGrid() {
return _primitivesGrid;
}
private ViewClient getViewClient() {
return _client;
}
private Client getLocal() {
return _local;
}
private Client getRemote() {
return _remote;
}
private ResultConverterCache getResultConverterCache() {
return _resultConverterCache;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import javax.time.Instant;
import org.apache.commons.lang.ObjectUtils;
import org.cometd.Client;
import org.cometd.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.marketdata.spec.MarketDataSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionFlags;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverterCache;
/**
*
*/
public class WebView {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private static final String STARTED_DISPLAY_NAME = "Live";
private static final String PAUSED_DISPLAY_NAME = "Paused";
private final Client _local;
private final Client _remote;
private final ViewClient _client;
private final String _viewDefinitionName;
private final ViewExecutionOptions _executionOptions;
private final ExecutorService _executorService;
private final ResultConverterCache _resultConverterCache;
private final ReentrantLock _updateLock = new ReentrantLock();
private boolean _awaitingNextUpdate;
private boolean _continueUpdateThread;
private boolean _updateThreadRunning;
private AtomicBoolean _isInit = new AtomicBoolean(false);
private final Map<String, WebViewGrid> _gridsByName;
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final String viewDefinitionName,
final ViewExecutionOptions executionOptions, final UserPrincipal user, final ExecutorService executorService,
final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_viewDefinitionName = viewDefinitionName;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
MarketDataSpecification marketDataSpec;
EnumSet<ViewExecutionFlags> flags;
client.attachToViewProcess(viewDefinitionName, executionOptions);
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
_isInit.set(true);
RequirementBasedWebViewGrid portfolioGrid = new WebViewPortfolioGrid(getViewClient(), compiledViewDefinition, getResultConverterCache(), getLocal(), getRemote());
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridsByName.put(_portfolioGrid.getName(), _portfolioGrid);
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(getViewClient(), compiledViewDefinition, getResultConverterCache(), getLocal(), getRemote());
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridsByName.put(_primitivesGrid.getName(), _primitivesGrid);
}
notifyInitialized();
}
private void notifyInitialized() {
getRemote().deliver(getLocal(), "/changeView", getInitialJsonGridStructures(), null);
}
/*package*/ void reconnected() {
if (_isInit.get()) {
notifyInitialized();
}
}
//-------------------------------------------------------------------------
// Update control
public void pause() {
getViewClient().pause();
sendViewStatus(false, PAUSED_DISPLAY_NAME);
}
public void resume() {
getViewClient().resume();
sendViewStatus(true, STARTED_DISPLAY_NAME);
}
public void shutdown() {
// Removes all listeners
getViewClient().shutdown();
}
public String getViewDefinitionName() {
return _viewDefinitionName;
}
public ViewExecutionOptions getExecutionOptions() {
return _executionOptions;
}
public boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
return getViewDefinitionName().equals(viewDefinitionName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
public WebViewGrid getGridByName(String name) {
return _gridsByName.get(name);
}
@SuppressWarnings("unchecked")
public void triggerUpdate(Message message) {
Map<String, Object> dataMap = (Map<String, Object>) message.getData();
boolean immediateResponse = (Boolean) dataMap.get("immediateResponse");
if (getPortfolioGrid() != null) {
Map<String, Object> portfolioViewport = (Map<String, Object>) dataMap.get("portfolioViewport");
getPortfolioGrid().setViewport(processViewportData(portfolioViewport));
}
if (getPrimitivesGrid() != null) {
Map<String, Object> primitiveViewport = (Map<String, Object>) dataMap.get("primitiveViewport");
getPrimitivesGrid().setViewport(processViewportData(primitiveViewport));
}
// Can only provide an immediate response if there is a result available
immediateResponse &= getViewClient().isResultAvailable();
_updateLock.lock();
try {
if (immediateResponse) {
sendImmediateUpdate();
} else {
_awaitingNextUpdate = true;
}
} finally {
_updateLock.unlock();
}
}
private SortedMap<Integer, Long> processViewportData(Map<String, Object> viewportData) {
SortedMap<Integer, Long> result = new TreeMap<Integer, Long>();
if (viewportData.isEmpty()) {
return result;
}
Object[] ids = (Object[]) viewportData.get("rowIds");
Object[] lastTimes = (Object[]) viewportData.get("lastTimestamps");
for (int i = 0; i < ids.length; i++) {
if (ids[i] instanceof Number) {
long jsRowId = (Long) ids[i];
int rowId = (int) jsRowId;
if (lastTimes[i] != null) {
Long lastTime = (Long) lastTimes[i];
result.put(rowId, lastTime);
} else {
result.put(rowId, null);
}
} else {
throw new OpenGammaRuntimeException("Unexpected type of webId: " + ids[i]);
}
}
return result;
}
private void sendImmediateUpdate() {
_updateLock.lock();
try {
if (!_updateThreadRunning) {
_updateThreadRunning = true;
runUpdateThread();
} else {
_continueUpdateThread = true;
}
} finally {
_updateLock.unlock();
}
}
private void runUpdateThread() {
getExecutorService().submit(new Runnable() {
@Override
public void run() {
do {
ViewComputationResultModel update = getViewClient().getLatestResult();
getRemote().startBatch();
long valuationTimeMillis = update.getValuationTime().toEpochMillisLong();
long calculationDurationMillis = update.getCalculationDuration().toMillisLong();
sendStartMessage(valuationTimeMillis, calculationDurationMillis);
try {
processResult(update);
} catch (Exception e) {
s_logger.error("Error processing result from view cycle " + update.getViewCycleId(), e);
}
sendEndMessage();
getRemote().endBatch();
} while (continueUpdateThread());
}
});
}
private boolean continueUpdateThread() {
_updateLock.lock();
try {
if (_continueUpdateThread) {
_continueUpdateThread = false;
return true;
} else {
_updateThreadRunning = false;
return false;
}
} finally {
_updateLock.unlock();
}
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch (target.getType()) {
case PRIMITIVE:
if (getPrimitivesGrid() != null) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (getPortfolioGrid() != null) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
break;
default:
// Something that the client does not display
continue;
}
}
}
/**
* Tells the remote client that updates are starting.
*/
private void sendStartMessage(long valuationTimeEpochMillis, long calculationDurationMillis) {
Map<String, Object> startMessage = new HashMap<String, Object>();
startMessage.put("valuationTime", valuationTimeEpochMillis);
startMessage.put("calculationDuration", calculationDurationMillis);
getRemote().deliver(getLocal(), "/updates/control/start", startMessage, null);
}
/**
* Tells the remote client that updates have finished.
*/
private void sendEndMessage() {
getRemote().deliver(getLocal(), "/updates/control/end", new HashMap<String, Object>(), null);
}
private void sendViewStatus(boolean isRunning, String status) {
Map<String, Object> output = new HashMap<String, Object>();
output.put("isRunning", isRunning);
output.put("status", status);
getRemote().deliver(getLocal(), "/status", output, null);
}
//-------------------------------------------------------------------------
public Map<String, Object> getInitialJsonGridStructures() {
Map<String, Object> gridStructures = new HashMap<String, Object>();
if (getPrimitivesGrid() != null) {
gridStructures.put("primitives", getPrimitivesGrid().getInitialJsonGridStructure());
}
if (getPortfolioGrid() != null) {
gridStructures.put("portfolio", getPortfolioGrid().getInitialJsonGridStructure());
}
return gridStructures;
}
public void setIncludeDepGraph(String parentGridName, WebGridCell cell, boolean includeDepGraph) {
if (!getPortfolioGrid().getName().equals(parentGridName)) {
throw new OpenGammaRuntimeException("Invalid or unknown grid for dependency graph viewing: " + parentGridName);
}
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
getViewClient().setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
getViewClient().setViewCycleAccessSupported(false);
}
}
WebViewGrid grid = getPortfolioGrid().setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
registerGrid(grid);
} else {
unregisterGrid(grid.getName());
}
}
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new OpenGammaRuntimeException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = getViewClient().getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
private void registerGrid(WebViewGrid grid) {
_gridsByName.put(grid.getName(), grid);
}
private void unregisterGrid(String gridName) {
_gridsByName.remove(gridName);
}
//-------------------------------------------------------------------------
private ExecutorService getExecutorService() {
return _executorService;
}
private RequirementBasedWebViewGrid getPortfolioGrid() {
return _portfolioGrid;
}
private RequirementBasedWebViewGrid getPrimitivesGrid() {
return _primitivesGrid;
}
private ViewClient getViewClient() {
return _client;
}
private Client getLocal() {
return _local;
}
private Client getRemote() {
return _remote;
}
private ResultConverterCache getResultConverterCache() {
return _resultConverterCache;
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import javax.time.Instant;
import org.apache.commons.lang.ObjectUtils;
import org.cometd.Client;
import org.cometd.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverterCache;
/**
*
*/
public class WebView {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private static final String STARTED_DISPLAY_NAME = "Live";
private static final String PAUSED_DISPLAY_NAME = "Paused";
private final Client _local;
private final Client _remote;
private final ViewClient _client;
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewExecutionOptions _executionOptions;
private final ExecutorService _executorService;
private final ResultConverterCache _resultConverterCache;
private final ReentrantLock _updateLock = new ReentrantLock();
private boolean _awaitingNextUpdate;
private boolean _continueUpdateThread;
private boolean _updateThreadRunning;
private AtomicBoolean _isInit = new AtomicBoolean(false);
private final Map<String, WebViewGrid> _gridsByName;
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId,
final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions,
final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
client.attachToViewProcess(viewDefinitionId, executionOptions);
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
_isInit.set(true);
RequirementBasedWebViewGrid portfolioGrid = new WebViewPortfolioGrid(getViewClient(), compiledViewDefinition, getResultConverterCache(), getLocal(), getRemote());
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridsByName.put(_portfolioGrid.getName(), _portfolioGrid);
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(getViewClient(), compiledViewDefinition, getResultConverterCache(), getLocal(), getRemote());
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridsByName.put(_primitivesGrid.getName(), _primitivesGrid);
}
notifyInitialized();
}
private void notifyInitialized() {
getRemote().deliver(getLocal(), "/changeView", getInitialJsonGridStructures(), null);
}
/*package*/ void reconnected() {
if (_isInit.get()) {
notifyInitialized();
}
}
//-------------------------------------------------------------------------
// Update control
public void pause() {
getViewClient().pause();
sendViewStatus(false, PAUSED_DISPLAY_NAME);
}
public void resume() {
getViewClient().resume();
sendViewStatus(true, STARTED_DISPLAY_NAME);
}
public void shutdown() {
// Removes all listeners
getViewClient().shutdown();
}
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
public UniqueId getViewDefinitionId() {
return _viewDefinitionId;
}
public ViewExecutionOptions getExecutionOptions() {
return _executionOptions;
}
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId)
&& ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
public WebViewGrid getGridByName(String name) {
return _gridsByName.get(name);
}
@SuppressWarnings("unchecked")
public void triggerUpdate(Message message) {
Map<String, Object> dataMap = (Map<String, Object>) message.getData();
boolean immediateResponse = (Boolean) dataMap.get("immediateResponse");
if (getPortfolioGrid() != null) {
Map<String, Object> portfolioViewport = (Map<String, Object>) dataMap.get("portfolioViewport");
getPortfolioGrid().setViewport(processViewportData(portfolioViewport));
}
if (getPrimitivesGrid() != null) {
Map<String, Object> primitiveViewport = (Map<String, Object>) dataMap.get("primitiveViewport");
getPrimitivesGrid().setViewport(processViewportData(primitiveViewport));
}
// Can only provide an immediate response if there is a result available
immediateResponse &= getViewClient().isResultAvailable();
_updateLock.lock();
try {
if (immediateResponse) {
sendImmediateUpdate();
} else {
_awaitingNextUpdate = true;
}
} finally {
_updateLock.unlock();
}
}
private SortedMap<Integer, Long> processViewportData(Map<String, Object> viewportData) {
SortedMap<Integer, Long> result = new TreeMap<Integer, Long>();
if (viewportData.isEmpty()) {
return result;
}
Object[] ids = (Object[]) viewportData.get("rowIds");
Object[] lastTimes = (Object[]) viewportData.get("lastTimestamps");
for (int i = 0; i < ids.length; i++) {
if (ids[i] instanceof Number) {
long jsRowId = (Long) ids[i];
int rowId = (int) jsRowId;
if (lastTimes[i] != null) {
Long lastTime = (Long) lastTimes[i];
result.put(rowId, lastTime);
} else {
result.put(rowId, null);
}
} else {
throw new OpenGammaRuntimeException("Unexpected type of webId: " + ids[i]);
}
}
return result;
}
private void sendImmediateUpdate() {
_updateLock.lock();
try {
if (!_updateThreadRunning) {
_updateThreadRunning = true;
runUpdateThread();
} else {
_continueUpdateThread = true;
}
} finally {
_updateLock.unlock();
}
}
private void runUpdateThread() {
getExecutorService().submit(new Runnable() {
@Override
public void run() {
do {
ViewComputationResultModel update = getViewClient().getLatestResult();
getRemote().startBatch();
long valuationTimeMillis = update.getValuationTime().toEpochMillisLong();
long calculationDurationMillis = update.getCalculationDuration().toMillisLong();
sendStartMessage(valuationTimeMillis, calculationDurationMillis);
try {
processResult(update);
} catch (Exception e) {
s_logger.error("Error processing result from view cycle " + update.getViewCycleId(), e);
}
sendEndMessage();
getRemote().endBatch();
} while (continueUpdateThread());
}
});
}
private boolean continueUpdateThread() {
_updateLock.lock();
try {
if (_continueUpdateThread) {
_continueUpdateThread = false;
return true;
} else {
_updateThreadRunning = false;
return false;
}
} finally {
_updateLock.unlock();
}
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
/**
* Tells the remote client that updates are starting.
*/
private void sendStartMessage(long valuationTimeEpochMillis, long calculationDurationMillis) {
Map<String, Object> startMessage = new HashMap<String, Object>();
startMessage.put("valuationTime", valuationTimeEpochMillis);
startMessage.put("calculationDuration", calculationDurationMillis);
getRemote().deliver(getLocal(), "/updates/control/start", startMessage, null);
}
/**
* Tells the remote client that updates have finished.
*/
private void sendEndMessage() {
getRemote().deliver(getLocal(), "/updates/control/end", new HashMap<String, Object>(), null);
}
private void sendViewStatus(boolean isRunning, String status) {
Map<String, Object> output = new HashMap<String, Object>();
output.put("isRunning", isRunning);
output.put("status", status);
getRemote().deliver(getLocal(), "/status", output, null);
}
//-------------------------------------------------------------------------
public Map<String, Object> getInitialJsonGridStructures() {
Map<String, Object> gridStructures = new HashMap<String, Object>();
if (getPrimitivesGrid() != null) {
gridStructures.put("primitives", getPrimitivesGrid().getInitialJsonGridStructure());
}
if (getPortfolioGrid() != null) {
gridStructures.put("portfolio", getPortfolioGrid().getInitialJsonGridStructure());
}
return gridStructures;
}
public void setIncludeDepGraph(String parentGridName, WebGridCell cell, boolean includeDepGraph) {
if (!getPortfolioGrid().getName().equals(parentGridName)) {
throw new OpenGammaRuntimeException("Invalid or unknown grid for dependency graph viewing: " + parentGridName);
}
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
getViewClient().setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
getViewClient().setViewCycleAccessSupported(false);
}
}
WebViewGrid grid = getPortfolioGrid().setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
registerGrid(grid);
} else {
unregisterGrid(grid.getName());
}
}
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new OpenGammaRuntimeException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = getViewClient().getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
private void registerGrid(WebViewGrid grid) {
_gridsByName.put(grid.getName(), grid);
}
private void unregisterGrid(String gridName) {
_gridsByName.remove(gridName);
}
//-------------------------------------------------------------------------
private ExecutorService getExecutorService() {
return _executorService;
}
private RequirementBasedWebViewGrid getPortfolioGrid() {
return _portfolioGrid;
}
private RequirementBasedWebViewGrid getPrimitivesGrid() {
return _primitivesGrid;
}
private ViewClient getViewClient() {
return _client;
}
private Client getLocal() {
return _local;
}
private Client getRemote() {
return _remote;
}
private ResultConverterCache getResultConverterCache() {
return _resultConverterCache;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import javax.time.Instant;
import org.apache.commons.lang.ObjectUtils;
import org.cometd.Client;
import org.cometd.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ResultConverterCache;
/**
*
*/
public class WebView {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private static final String STARTED_DISPLAY_NAME = "Live";
private static final String PAUSED_DISPLAY_NAME = "Paused";
private final Client _local;
private final Client _remote;
private final ViewClient _client;
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewExecutionOptions _executionOptions;
private final ExecutorService _executorService;
private final ResultConverterCache _resultConverterCache;
private final ReentrantLock _updateLock = new ReentrantLock();
private boolean _awaitingNextUpdate;
private boolean _continueUpdateThread;
private boolean _updateThreadRunning;
private AtomicBoolean _isInit = new AtomicBoolean(false);
private final Map<String, WebViewGrid> _gridsByName;
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId,
final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions,
final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
client.attachToViewProcess(viewDefinitionId, executionOptions);
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
_isInit.set(true);
RequirementBasedWebViewGrid portfolioGrid = new WebViewPortfolioGrid(getViewClient(), compiledViewDefinition, getResultConverterCache(), getLocal(), getRemote());
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridsByName.put(_portfolioGrid.getName(), _portfolioGrid);
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(getViewClient(), compiledViewDefinition, getResultConverterCache(), getLocal(), getRemote());
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridsByName.put(_primitivesGrid.getName(), _primitivesGrid);
}
notifyInitialized();
}
private void notifyInitialized() {
getRemote().deliver(getLocal(), "/changeView", getInitialJsonGridStructures(), null);
}
/*package*/ void reconnected() {
if (_isInit.get()) {
notifyInitialized();
}
}
//-------------------------------------------------------------------------
// Update control
public void pause() {
getViewClient().pause();
sendViewStatus(false, PAUSED_DISPLAY_NAME);
}
public void resume() {
getViewClient().resume();
sendViewStatus(true, STARTED_DISPLAY_NAME);
}
public void shutdown() {
// Removes all listeners
getViewClient().shutdown();
}
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
public UniqueId getViewDefinitionId() {
return _viewDefinitionId;
}
public ViewExecutionOptions getExecutionOptions() {
return _executionOptions;
}
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId)
&& ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
public WebViewGrid getGridByName(String name) {
return _gridsByName.get(name);
}
@SuppressWarnings("unchecked")
public void triggerUpdate(Message message) {
Map<String, Object> dataMap = (Map<String, Object>) message.getData();
boolean immediateResponse = (Boolean) dataMap.get("immediateResponse");
if (getPortfolioGrid() != null) {
Map<String, Object> portfolioViewport = (Map<String, Object>) dataMap.get("portfolioViewport");
getPortfolioGrid().setViewport(processViewportData(portfolioViewport));
}
if (getPrimitivesGrid() != null) {
Map<String, Object> primitiveViewport = (Map<String, Object>) dataMap.get("primitiveViewport");
getPrimitivesGrid().setViewport(processViewportData(primitiveViewport));
}
// Can only provide an immediate response if there is a result available
immediateResponse &= getViewClient().isResultAvailable();
_updateLock.lock();
try {
if (immediateResponse) {
sendImmediateUpdate();
} else {
_awaitingNextUpdate = true;
}
} finally {
_updateLock.unlock();
}
}
private SortedMap<Integer, Long> processViewportData(Map<String, Object> viewportData) {
SortedMap<Integer, Long> result = new TreeMap<Integer, Long>();
if (viewportData.isEmpty()) {
return result;
}
Object[] ids = (Object[]) viewportData.get("rowIds");
Object[] lastTimes = (Object[]) viewportData.get("lastTimestamps");
for (int i = 0; i < ids.length; i++) {
if (ids[i] instanceof Number) {
long jsRowId = (Long) ids[i];
int rowId = (int) jsRowId;
if (lastTimes[i] != null) {
Long lastTime = (Long) lastTimes[i];
result.put(rowId, lastTime);
} else {
result.put(rowId, null);
}
} else {
throw new OpenGammaRuntimeException("Unexpected type of webId: " + ids[i]);
}
}
return result;
}
private void sendImmediateUpdate() {
_updateLock.lock();
try {
if (!_updateThreadRunning) {
_updateThreadRunning = true;
runUpdateThread();
} else {
_continueUpdateThread = true;
}
} finally {
_updateLock.unlock();
}
}
private void runUpdateThread() {
getExecutorService().submit(new Runnable() {
@Override
public void run() {
do {
ViewComputationResultModel update = getViewClient().getLatestResult();
getRemote().startBatch();
long valuationTimeMillis = update.getValuationTime().toEpochMillisLong();
long calculationDurationMillis = update.getCalculationDuration().toMillisLong();
sendStartMessage(valuationTimeMillis, calculationDurationMillis);
try {
processResult(update);
} catch (Exception e) {
s_logger.error("Error processing result from view cycle " + update.getViewCycleId(), e);
}
sendEndMessage();
getRemote().endBatch();
} while (continueUpdateThread());
}
});
}
private boolean continueUpdateThread() {
_updateLock.lock();
try {
if (_continueUpdateThread) {
_continueUpdateThread = false;
return true;
} else {
_updateThreadRunning = false;
return false;
}
} finally {
_updateLock.unlock();
}
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
/**
* Tells the remote client that updates are starting.
*/
private void sendStartMessage(long valuationTimeEpochMillis, long calculationDurationMillis) {
Map<String, Object> startMessage = new HashMap<String, Object>();
startMessage.put("valuationTime", valuationTimeEpochMillis);
startMessage.put("calculationDuration", calculationDurationMillis);
getRemote().deliver(getLocal(), "/updates/control/start", startMessage, null);
}
/**
* Tells the remote client that updates have finished.
*/
private void sendEndMessage() {
getRemote().deliver(getLocal(), "/updates/control/end", new HashMap<String, Object>(), null);
}
private void sendViewStatus(boolean isRunning, String status) {
Map<String, Object> output = new HashMap<String, Object>();
output.put("isRunning", isRunning);
output.put("status", status);
getRemote().deliver(getLocal(), "/status", output, null);
}
//-------------------------------------------------------------------------
public Map<String, Object> getInitialJsonGridStructures() {
Map<String, Object> gridStructures = new HashMap<String, Object>();
if (getPrimitivesGrid() != null) {
gridStructures.put("primitives", getPrimitivesGrid().getInitialJsonGridStructure());
}
if (getPortfolioGrid() != null) {
gridStructures.put("portfolio", getPortfolioGrid().getInitialJsonGridStructure());
}
return gridStructures;
}
public void setIncludeDepGraph(String parentGridName, WebGridCell cell, boolean includeDepGraph) {
if (!getPortfolioGrid().getName().equals(parentGridName)) {
throw new OpenGammaRuntimeException("Invalid or unknown grid for dependency graph viewing: " + parentGridName);
}
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
getViewClient().setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
getViewClient().setViewCycleAccessSupported(false);
}
}
WebViewGrid grid = getPortfolioGrid().setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
registerGrid(grid);
} else {
unregisterGrid(grid.getName());
}
}
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new OpenGammaRuntimeException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = getViewClient().getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
private void registerGrid(WebViewGrid grid) {
_gridsByName.put(grid.getName(), grid);
}
private void unregisterGrid(String gridName) {
_gridsByName.remove(gridName);
}
//-------------------------------------------------------------------------
private ExecutorService getExecutorService() {
return _executorService;
}
private RequirementBasedWebViewGrid getPortfolioGrid() {
return _portfolioGrid;
}
private RequirementBasedWebViewGrid getPrimitivesGrid() {
return _primitivesGrid;
}
private ViewClient getViewClient() {
return _client;
}
private Client getLocal() {
return _local;
}
private Client getRemote() {
return _remote;
}
private ResultConverterCache getResultConverterCache() {
return _resultConverterCache;
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.DataNotFoundException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
* TODO CONCURRENCY - I've scrapped all the locking, needs to be reviewed and replaced
* TODO return new viewport instance rather than implementing it?
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private final ViewClient _viewClient;
private final String _viewDefinitionName;
private final ResultConverterCache _resultConverterCache;
private final Map<String,Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
// TODO get the state from the grids
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String,Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
public WebView(ViewClient viewClient,
String viewDefinitionName,
ResultConverterCache resultConverterCache,
ViewportDefinition viewportDefinition,
AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
}
});
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
}
// TODO make sure an update event is published when the view defs compile?
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/* package */ void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
/* package */ void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
/* package */ void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
public String getViewDefinitionName() {
synchronized (_lock) {
return _viewDefinitionName;
}
}
/* package */ boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) &&
ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
/**
*
*/
/* package */ Viewport configureViewport(ViewportDefinition viewportDefinition,
AnalyticsListener listener,
String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch (target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
// TODO refactor this?
// TODO CONCURRENCY
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.DataNotFoundException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
* TODO CONCURRENCY - I've scrapped all the locking, needs to be reviewed and replaced
* TODO return new viewport instance rather than implementing it?
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private final ViewClient _viewClient;
private final String _viewDefinitionName;
private final ResultConverterCache _resultConverterCache;
private final Map<String,Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
// TODO get the state from the grids
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String,Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
public WebView(ViewClient viewClient,
String viewDefinitionName,
ResultConverterCache resultConverterCache,
ViewportDefinition viewportDefinition,
AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
}
});
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
}
// TODO make sure an update event is published when the view defs compile?
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/* package */ void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
/* package */ void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
/* package */ void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
public String getViewDefinitionName() {
synchronized (_lock) {
return _viewDefinitionName;
}
}
/* package */ boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) &&
ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
/**
*
*/
/* package */ Viewport configureViewport(ViewportDefinition viewportDefinition,
AnalyticsListener listener,
String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch (target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
// TODO refactor this?
// TODO CONCURRENCY
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.DataNotFoundException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
*
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewClient _viewClient;
private final ResultConverterCache _resultConverterCache;
private final Map<String, Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId, final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions, final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
client.attachToViewProcess(viewDefinitionId, executionOptions);
}
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String, Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
public WebView(ViewClient viewClient, String viewDefinitionName, ResultConverterCache resultConverterCache, ViewportDefinition viewportDefinition, AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
}
});
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/*package*/
//-------------------------------------------------------------------------
// Update control
void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
public UniqueId getViewDefinitionId() {
synchronized (_lock) {
return _viewDefinitionName;
}
}
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId) && ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) && ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
/**
* Tells the remote client that updates are starting.
*/
/**
* Tells the remote client that updates have finished.
*/
//-------------------------------------------------------------------------
/**
*
*/
/* package */
Viewport configureViewport(ViewportDefinition viewportDefinition, AnalyticsListener listener, String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch(target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.DataNotFoundException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
*
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewClient _viewClient;
private final ResultConverterCache _resultConverterCache;
private final Map<String, Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId, final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions, final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
client.attachToViewProcess(viewDefinitionId, executionOptions);
}
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String, Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
public WebView(ViewClient viewClient, String viewDefinitionName, ResultConverterCache resultConverterCache, ViewportDefinition viewportDefinition, AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
}
});
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/*package*/
//-------------------------------------------------------------------------
// Update control
void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
public UniqueId getViewDefinitionId() {
synchronized (_lock) {
return _viewDefinitionName;
}
}
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId) && ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) && ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
/**
* Tells the remote client that updates are starting.
*/
/**
* Tells the remote client that updates have finished.
*/
//-------------------------------------------------------------------------
/**
*
*/
/* package */
Viewport configureViewport(ViewportDefinition viewportDefinition, AnalyticsListener listener, String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch(target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.DataNotFoundException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
*
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewClient _viewClient;
private final ResultConverterCache _resultConverterCache;
private final Map<String, Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId, final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions, final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
client.attachToViewProcess(viewDefinitionId, executionOptions);
}
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String, Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
public WebView(ViewClient viewClient, String viewDefinitionName, ResultConverterCache resultConverterCache, ViewportDefinition viewportDefinition, AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
}
});
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/*package*/
//-------------------------------------------------------------------------
// Update control
void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
public UniqueId getViewDefinitionId() {
return _viewDefinitionId;
}
public String getViewDefinitionName() {
synchronized (_lock) {
return _viewDefinitionName;
}
}
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId) && ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) && ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
/**
* Tells the remote client that updates are starting.
*/
/**
* Tells the remote client that updates have finished.
*/
//-------------------------------------------------------------------------
/**
*
*/
/* package */
Viewport configureViewport(ViewportDefinition viewportDefinition, AnalyticsListener listener, String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch(target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.DataNotFoundException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
*
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewClient _viewClient;
private final ResultConverterCache _resultConverterCache;
private final Map<String, Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId, final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions, final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
client.attachToViewProcess(viewDefinitionId, executionOptions);
}
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String, Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
public WebView(ViewClient viewClient, String viewDefinitionName, ResultConverterCache resultConverterCache, ViewportDefinition viewportDefinition, AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
}
});
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/*package*/
//-------------------------------------------------------------------------
// Update control
void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
public UniqueId getViewDefinitionId() {
return _viewDefinitionId;
}
public String getViewDefinitionName() {
synchronized (_lock) {
return _viewDefinitionName;
}
}
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId) && ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) && ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
/**
* Tells the remote client that updates are starting.
*/
/**
* Tells the remote client that updates have finished.
*/
//-------------------------------------------------------------------------
/**
*
*/
/* package */
Viewport configureViewport(ViewportDefinition viewportDefinition, AnalyticsListener listener, String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch(target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.DataNotFoundException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
*
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewClient _viewClient;
private final ResultConverterCache _resultConverterCache;
private final Map<String,Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId,
final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions,
final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
client.attachToViewProcess(viewDefinitionId, executionOptions);
}
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String,Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
public WebView(ViewClient viewClient,
String viewDefinitionName,
ResultConverterCache resultConverterCache,
ViewportDefinition viewportDefinition,
AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
}
});
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/*package*/
//-------------------------------------------------------------------------
// Update control
void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
<<<<<<< MINE
public String getViewDefinitionName() {
synchronized (_lock) {
return _viewDefinitionName;
}
}
=======
public UniqueId getViewDefinitionId() {
return _viewDefinitionId;
}
>>>>>>> YOURS
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId)
&& ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) &&
ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
<<<<<<< MINE
=======
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
>>>>>>> YOURS
/**
* Tells the remote client that updates are starting.
*/
/**
* Tells the remote client that updates have finished.
*/
//-------------------------------------------------------------------------
/**
*
*/
/* package */ Viewport configureViewport(ViewportDefinition viewportDefinition,
AnalyticsListener listener,
String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch (target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
import com.opengamma.DataNotFoundException;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
*
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewClient _viewClient;
private final ResultConverterCache _resultConverterCache;
private final Map<String,Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId,
final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions,
final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
_resultConverterCache = resultConverterCache;
_gridsByName = new HashMap<String, WebViewGrid>();
_client.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
}
});
client.attachToViewProcess(viewDefinitionId, executionOptions);
}
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String,Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
public WebView(ViewClient viewClient,
String viewDefinitionName,
ResultConverterCache resultConverterCache,
ViewportDefinition viewportDefinition,
AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
}
});
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
}
//-------------------------------------------------------------------------
// Initialisation
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/*package*/
//-------------------------------------------------------------------------
// Update control
void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
<<<<<<< MINE
public String getViewDefinitionName() {
synchronized (_lock) {
return _viewDefinitionName;
}
}
=======
public UniqueId getViewDefinitionId() {
return _viewDefinitionId;
}
>>>>>>> YOURS
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId)
&& ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) &&
ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
<<<<<<< MINE
=======
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
>>>>>>> YOURS
/**
* Tells the remote client that updates are starting.
*/
/**
* Tells the remote client that updates have finished.
*/
//-------------------------------------------------------------------------
/**
*
*/
/* package */ Viewport configureViewport(ViewportDefinition viewportDefinition,
AnalyticsListener listener,
String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch (target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
<<<<<<< MINE
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import javax.time.Instant;
import org.apache.commons.lang.ObjectUtils;
import org.cometd.Client;
import org.cometd.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
=======
import com.opengamma.DataNotFoundException;
>>>>>>> YOURS
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
* TODO CONCURRENCY - I've scrapped all the locking, needs to be reviewed and replaced
* TODO return new viewport instance rather than implementing it?
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
<<<<<<< MINE
private static final String STARTED_DISPLAY_NAME = "Live";
private static final String PAUSED_DISPLAY_NAME = "Paused";
private final Client _local;
private final Client _remote;
private final ViewClient _client;
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewExecutionOptions _executionOptions;
private final ExecutorService _executorService;
=======
private final ViewClient _viewClient;
private final String _viewDefinitionName;
>>>>>>> YOURS
private final ResultConverterCache _resultConverterCache;
private final Map<String,Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
// TODO get the state from the grids
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String,Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
<<<<<<< MINE
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId,
final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions,
final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
=======
public WebView(ViewClient viewClient,
String viewDefinitionName,
ResultConverterCache resultConverterCache,
ViewportDefinition viewportDefinition,
AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
>>>>>>> YOURS
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
<<<<<<< MINE
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
=======
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
>>>>>>> YOURS
}
});
<<<<<<< MINE
client.attachToViewProcess(viewDefinitionId, executionOptions);
=======
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
>>>>>>> YOURS
}
// TODO make sure an update event is published when the view defs compile?
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/* package */ void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
/* package */ void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
/* package */ void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
<<<<<<< MINE
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
public UniqueId getViewDefinitionId() {
return _viewDefinitionId;
}
public ViewExecutionOptions getExecutionOptions() {
return _executionOptions;
}
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId)
&& ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
public WebViewGrid getGridByName(String name) {
return _gridsByName.get(name);
}
@SuppressWarnings("unchecked")
public void triggerUpdate(Message message) {
Map<String, Object> dataMap = (Map<String, Object>) message.getData();
boolean immediateResponse = (Boolean) dataMap.get("immediateResponse");
if (getPortfolioGrid() != null) {
Map<String, Object> portfolioViewport = (Map<String, Object>) dataMap.get("portfolioViewport");
getPortfolioGrid().setViewport(processViewportData(portfolioViewport));
=======
public String getViewDefinitionName() {
synchronized (_lock) {
return _viewDefinitionName;
>>>>>>> YOURS
}
}
/* package */ boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) &&
ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
/**
*
*/
/* package */ Viewport configureViewport(ViewportDefinition viewportDefinition,
AnalyticsListener listener,
String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch (target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
<<<<<<< MINE
private void runUpdateThread() {
getExecutorService().submit(new Runnable() {
@Override
public void run() {
do {
ViewComputationResultModel update = getViewClient().getLatestResult();
getRemote().startBatch();
long valuationTimeMillis = update.getValuationTime().toEpochMillisLong();
long calculationDurationMillis = update.getCalculationDuration().toMillisLong();
sendStartMessage(valuationTimeMillis, calculationDurationMillis);
try {
processResult(update);
} catch (Exception e) {
s_logger.error("Error processing result from view cycle " + update.getViewCycleId(), e);
}
sendEndMessage();
getRemote().endBatch();
} while (continueUpdateThread());
}
});
}
private boolean continueUpdateThread() {
_updateLock.lock();
try {
if (_continueUpdateThread) {
_continueUpdateThread = false;
return true;
} else {
_updateThreadRunning = false;
return false;
}
} finally {
_updateLock.unlock();
}
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
/**
* Tells the remote client that updates are starting.
*/
private void sendStartMessage(long valuationTimeEpochMillis, long calculationDurationMillis) {
Map<String, Object> startMessage = new HashMap<String, Object>();
startMessage.put("valuationTime", valuationTimeEpochMillis);
startMessage.put("calculationDuration", calculationDurationMillis);
getRemote().deliver(getLocal(), "/updates/control/start", startMessage, null);
}
/**
* Tells the remote client that updates have finished.
*/
private void sendEndMessage() {
getRemote().deliver(getLocal(), "/updates/control/end", new HashMap<String, Object>(), null);
}
private void sendViewStatus(boolean isRunning, String status) {
Map<String, Object> output = new HashMap<String, Object>();
output.put("isRunning", isRunning);
output.put("status", status);
getRemote().deliver(getLocal(), "/status", output, null);
}
//-------------------------------------------------------------------------
public Map<String, Object> getInitialJsonGridStructures() {
Map<String, Object> gridStructures = new HashMap<String, Object>();
if (getPrimitivesGrid() != null) {
gridStructures.put("primitives", getPrimitivesGrid().getInitialJsonGridStructure());
}
if (getPortfolioGrid() != null) {
gridStructures.put("portfolio", getPortfolioGrid().getInitialJsonGridStructure());
}
return gridStructures;
}
public void setIncludeDepGraph(String parentGridName, WebGridCell cell, boolean includeDepGraph) {
if (!getPortfolioGrid().getName().equals(parentGridName)) {
throw new OpenGammaRuntimeException("Invalid or unknown grid for dependency graph viewing: " + parentGridName);
}
=======
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
>>>>>>> YOURS
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
// TODO refactor this?
// TODO CONCURRENCY
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server;
<<<<<<< MINE
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import javax.time.Instant;
import org.apache.commons.lang.ObjectUtils;
import org.cometd.Client;
import org.cometd.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.OpenGammaRuntimeException;
=======
import com.opengamma.DataNotFoundException;
>>>>>>> YOURS
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.client.ViewClient;
import com.opengamma.engine.view.compilation.CompiledViewDefinition;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.listener.AbstractViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.livedata.UserPrincipal;
import com.opengamma.util.tuple.Pair;
import com.opengamma.web.server.conversion.ConversionMode;
import com.opengamma.web.server.conversion.ResultConverterCache;
import com.opengamma.web.server.push.AnalyticsListener;
import com.opengamma.web.server.push.Viewport;
import com.opengamma.web.server.push.ViewportDefinition;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
* TODO CONCURRENCY - I've scrapped all the locking, needs to be reviewed and replaced
* TODO return new viewport instance rather than implementing it?
*/
public class WebView implements Viewport {
private static final Logger s_logger = LoggerFactory.getLogger(WebView.class);
<<<<<<< MINE
private static final String STARTED_DISPLAY_NAME = "Live";
private static final String PAUSED_DISPLAY_NAME = "Paused";
private final Client _local;
private final Client _remote;
private final ViewClient _client;
private final UniqueId _baseViewDefinitionId;
private final String _aggregatorName;
private final UniqueId _viewDefinitionId;
private final ViewExecutionOptions _executionOptions;
private final ExecutorService _executorService;
=======
private final ViewClient _viewClient;
private final String _viewDefinitionName;
>>>>>>> YOURS
private final ResultConverterCache _resultConverterCache;
private final Map<String,Object> _latestResults = new HashMap<String, Object>();
private final Object _lock = new Object();
private RequirementBasedWebViewGrid _portfolioGrid;
private RequirementBasedWebViewGrid _primitivesGrid;
// TODO get the state from the grids
private final AtomicInteger _activeDepGraphCount = new AtomicInteger();
private ViewportDefinition _viewportDefinition;
private AnalyticsListener _listener;
private Map<String,Object> _gridStructures;
private boolean _initialized = false;
private boolean _sendAnalyticsUpdates = false;
<<<<<<< MINE
public WebView(final Client local, final Client remote, final ViewClient client, final UniqueId baseViewDefinitionId,
final String aggregatorName, final UniqueId viewDefinitionId, final ViewExecutionOptions executionOptions,
final UserPrincipal user, final ExecutorService executorService, final ResultConverterCache resultConverterCache) {
ArgumentChecker.notNull(executionOptions, "executionOptions");
_local = local;
_remote = remote;
_client = client;
_baseViewDefinitionId = baseViewDefinitionId;
_aggregatorName = aggregatorName;
_viewDefinitionId = viewDefinitionId;
_executionOptions = executionOptions;
_executorService = executorService;
=======
public WebView(ViewClient viewClient,
String viewDefinitionName,
ResultConverterCache resultConverterCache,
ViewportDefinition viewportDefinition,
AnalyticsListener listener) {
_viewClient = viewClient;
_viewDefinitionName = viewDefinitionName;
>>>>>>> YOURS
_resultConverterCache = resultConverterCache;
_viewportDefinition = viewportDefinition;
_listener = listener;
_viewClient.setResultListener(new AbstractViewResultListener() {
@Override
public UserPrincipal getUser() {
// Authentication needed
return UserPrincipal.getLocalUser();
}
@Override
public void viewDefinitionCompiled(CompiledViewDefinition compiledViewDefinition, boolean hasMarketDataPermissions) {
s_logger.info("View definition compiled: {}", compiledViewDefinition.getViewDefinition().getName());
initGrids(compiledViewDefinition);
}
@Override
public void cycleCompleted(ViewComputationResultModel fullResult, ViewDeltaResultModel deltaResult) {
<<<<<<< MINE
s_logger.info("New result arrived for view '{}'", getViewDefinitionId());
_updateLock.lock();
try {
if (_awaitingNextUpdate) {
_awaitingNextUpdate = false;
sendImmediateUpdate();
}
} finally {
_updateLock.unlock();
}
=======
s_logger.info("New result arrived for view '{}'", getViewDefinitionName());
updateResults();
>>>>>>> YOURS
}
});
<<<<<<< MINE
client.attachToViewProcess(viewDefinitionId, executionOptions);
=======
_viewClient.attachToViewProcess(viewDefinitionName, viewportDefinition.getExecutionOptions());
>>>>>>> YOURS
}
// TODO make sure an update event is published when the view defs compile?
private void initGrids(CompiledViewDefinition compiledViewDefinition) {
synchronized (_lock) {
WebViewPortfolioGrid portfolioGrid = new WebViewPortfolioGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
_gridStructures = new HashMap<String, Object>();
if (portfolioGrid.getGridStructure().isEmpty()) {
_portfolioGrid = null;
} else {
_portfolioGrid = portfolioGrid;
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
_gridStructures.put("portfolio", _portfolioGrid.getInitialJsonGridStructure());
}
RequirementBasedWebViewGrid primitivesGrid = new WebViewPrimitivesGrid(_viewClient, compiledViewDefinition, _resultConverterCache);
if (primitivesGrid.getGridStructure().isEmpty()) {
_primitivesGrid = null;
} else {
_primitivesGrid = primitivesGrid;
_gridStructures.put("primitives", _primitivesGrid.getInitialJsonGridStructure());
}
_initialized = true;
_listener.gridStructureChanged();
configureGridViewports();
}
}
/* package */ void pause() {
synchronized (_lock) {
_viewClient.pause();
}
}
/* package */ void resume() {
synchronized (_lock) {
_viewClient.resume();
}
}
/* package */ void shutdown() {
// Removes all listeners
synchronized (_lock) {
_viewClient.shutdown();
}
}
<<<<<<< MINE
public UniqueId getBaseViewDefinitionId() {
return _baseViewDefinitionId;
}
public String getAggregatorName() {
return _aggregatorName;
}
public UniqueId getViewDefinitionId() {
return _viewDefinitionId;
}
public ViewExecutionOptions getExecutionOptions() {
return _executionOptions;
}
public boolean matches(UniqueId baseViewDefinitionId, String aggregatorName, ViewExecutionOptions executionOptions) {
return getBaseViewDefinitionId().equals(baseViewDefinitionId)
&& ObjectUtils.equals(getAggregatorName(), aggregatorName) && ObjectUtils.equals(getExecutionOptions(), executionOptions);
}
public WebViewGrid getGridByName(String name) {
return _gridsByName.get(name);
}
@SuppressWarnings("unchecked")
public void triggerUpdate(Message message) {
Map<String, Object> dataMap = (Map<String, Object>) message.getData();
boolean immediateResponse = (Boolean) dataMap.get("immediateResponse");
if (getPortfolioGrid() != null) {
Map<String, Object> portfolioViewport = (Map<String, Object>) dataMap.get("portfolioViewport");
getPortfolioGrid().setViewport(processViewportData(portfolioViewport));
=======
public String getViewDefinitionName() {
synchronized (_lock) {
return _viewDefinitionName;
>>>>>>> YOURS
}
}
/* package */ boolean matches(String viewDefinitionName, ViewExecutionOptions executionOptions) {
synchronized (_lock) {
return _viewDefinitionName.equals(viewDefinitionName) &&
ObjectUtils.equals(_viewportDefinition.getExecutionOptions(), executionOptions);
}
}
private WebViewGrid getGridByName(String name) {
if (_primitivesGrid != null) {
if (_primitivesGrid.getName().equals(name)) {
return _primitivesGrid;
}
WebViewGrid depGraphGrid = _primitivesGrid.getDepGraphGrid(name);
if (depGraphGrid != null) {
return depGraphGrid;
}
}
if (_portfolioGrid != null) {
if (_portfolioGrid.getName().equals(name)) {
return _portfolioGrid;
} else {
return _portfolioGrid.getDepGraphGrid(name);
}
}
return null;
}
/**
*
*/
/* package */ Viewport configureViewport(ViewportDefinition viewportDefinition,
AnalyticsListener listener,
String viewportKey) {
synchronized (_lock) {
_viewportDefinition = viewportDefinition;
_listener = listener;
configureGridViewports();
return this;
}
}
private void configureGridViewports() {
if (!_initialized) {
return;
}
_portfolioGrid.setViewport(_viewportDefinition.getPortfolioRows());
_portfolioGrid.updateDepGraphCells(_viewportDefinition.getPortfolioDependencyGraphCells());
_primitivesGrid.setViewport(_viewportDefinition.getPrimitiveRows());
_primitivesGrid.updateDepGraphCells(_viewportDefinition.getPrimitiveDependencyGraphCells());
// TODO _client.setViewCycleAccessSupported()?
updateResults();
}
private void updateResults() {
synchronized (_lock) {
if (!_viewClient.isResultAvailable()) {
return;
}
ViewComputationResultModel resultModel = _viewClient.getLatestResult();
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
HashMap<Integer, Map<String, Object>> portfolioResult = new HashMap<Integer, Map<String, Object>>();
HashMap<Integer, Map<String, Object>> primitiveResult = new HashMap<Integer, Map<String, Object>>();
for (ComputationTargetSpecification target : resultModel.getAllTargets()) {
switch (target.getType()) {
case PRIMITIVE:
if (_primitivesGrid != null) {
Map<String, Object> targetResult = _primitivesGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
primitiveResult.put(rowId, targetResult);
}
}
break;
case PORTFOLIO_NODE:
case POSITION:
if (_portfolioGrid != null) {
Map<String, Object> targetResult = _portfolioGrid.getTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
if (targetResult != null) {
Integer rowId = (Integer) targetResult.get("rowId");
portfolioResult.put(rowId, targetResult);
}
}
}
}
_latestResults.clear();
_latestResults.put("portfolio", portfolioResult);
_latestResults.put("primitive", primitiveResult);
if (_sendAnalyticsUpdates) {
_sendAnalyticsUpdates = false;
_listener.dataChanged();
}
}
}
<<<<<<< MINE
private void runUpdateThread() {
getExecutorService().submit(new Runnable() {
@Override
public void run() {
do {
ViewComputationResultModel update = getViewClient().getLatestResult();
getRemote().startBatch();
long valuationTimeMillis = update.getValuationTime().toEpochMillisLong();
long calculationDurationMillis = update.getCalculationDuration().toMillisLong();
sendStartMessage(valuationTimeMillis, calculationDurationMillis);
try {
processResult(update);
} catch (Exception e) {
s_logger.error("Error processing result from view cycle " + update.getViewCycleId(), e);
}
sendEndMessage();
getRemote().endBatch();
} while (continueUpdateThread());
}
});
}
private boolean continueUpdateThread() {
_updateLock.lock();
try {
if (_continueUpdateThread) {
_continueUpdateThread = false;
return true;
} else {
_updateThreadRunning = false;
return false;
}
} finally {
_updateLock.unlock();
}
}
private void processResult(ViewComputationResultModel resultModel) {
long resultTimestamp = resultModel.getCalculationTime().toEpochMillisLong();
if (getPrimitivesGrid() != null) {
for (ComputationTargetSpecification target : getPrimitivesGrid().getGridStructure().getTargets().keySet()) {
getPrimitivesGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
}
if (getPortfolioGrid() != null) {
for (ComputationTargetSpecification target : getPortfolioGrid().getGridStructure().getTargets().keySet()) {
getPortfolioGrid().processTargetResult(target, resultModel.getTargetResult(target), resultTimestamp);
}
getPortfolioGrid().processDepGraphs(resultTimestamp);
}
}
/**
* Tells the remote client that updates are starting.
*/
private void sendStartMessage(long valuationTimeEpochMillis, long calculationDurationMillis) {
Map<String, Object> startMessage = new HashMap<String, Object>();
startMessage.put("valuationTime", valuationTimeEpochMillis);
startMessage.put("calculationDuration", calculationDurationMillis);
getRemote().deliver(getLocal(), "/updates/control/start", startMessage, null);
}
/**
* Tells the remote client that updates have finished.
*/
private void sendEndMessage() {
getRemote().deliver(getLocal(), "/updates/control/end", new HashMap<String, Object>(), null);
}
private void sendViewStatus(boolean isRunning, String status) {
Map<String, Object> output = new HashMap<String, Object>();
output.put("isRunning", isRunning);
output.put("status", status);
getRemote().deliver(getLocal(), "/status", output, null);
}
//-------------------------------------------------------------------------
public Map<String, Object> getInitialJsonGridStructures() {
Map<String, Object> gridStructures = new HashMap<String, Object>();
if (getPrimitivesGrid() != null) {
gridStructures.put("primitives", getPrimitivesGrid().getInitialJsonGridStructure());
}
if (getPortfolioGrid() != null) {
gridStructures.put("portfolio", getPortfolioGrid().getInitialJsonGridStructure());
}
return gridStructures;
}
public void setIncludeDepGraph(String parentGridName, WebGridCell cell, boolean includeDepGraph) {
if (!getPortfolioGrid().getName().equals(parentGridName)) {
throw new OpenGammaRuntimeException("Invalid or unknown grid for dependency graph viewing: " + parentGridName);
}
=======
// TODO this logic need to go in configureViewport
private void setIncludeDepGraph(WebGridCell cell, boolean includeDepGraph) {
// TODO this is ugly, the dep graph count belongs in the portfolio grid
>>>>>>> YOURS
if (includeDepGraph) {
if (_activeDepGraphCount.getAndIncrement() == 0) {
_viewClient.setViewCycleAccessSupported(true);
}
} else {
if (_activeDepGraphCount.decrementAndGet() == 0) {
_viewClient.setViewCycleAccessSupported(false);
}
}
/*WebViewGrid grid = _portfolioGrid.setIncludeDepGraph(cell, includeDepGraph);
if (grid != null) {
if (includeDepGraph) {
_gridsByName.put(grid.getName(), grid);
} else {
_gridsByName.remove(grid.getName());
}
}*/
}
// TODO refactor this?
// TODO CONCURRENCY
public Pair<Instant, String> getGridContentsAsCsv(String gridName) {
WebViewGrid grid = getGridByName(gridName);
if (grid == null) {
throw new DataNotFoundException("Unknown grid '" + gridName + "'");
}
ViewComputationResultModel latestResult = _viewClient.getLatestResult();
if (latestResult == null) {
return null;
}
String csv = grid.dumpContentsToCsv(latestResult);
return Pair.of(latestResult.getValuationTime(), csv);
}
@Override
public Map<String, Object> getGridStructure() {
synchronized (_lock) {
return _gridStructures;
}
}
@Override
public Map<String, Object> getLatestResults() {
synchronized (_lock) {
_sendAnalyticsUpdates = true;
return _latestResults;
}
}
@Override
public void setRunning(boolean run) {
throw new UnsupportedOperationException("setRunning not implemented");
}
@Override
public void setConversionMode(ConversionMode mode) {
throw new UnsupportedOperationException("setConversionMode not implemented");
}
}
Diff Result
No diff
Case 69 - java_ogplatform.rev_a1a90_46fa2..InterpolatedDiscountCurve.java
Base
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeField;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final Interpolator1D _interpolator;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolator == null)
throw new IllegalArgumentException("Interpolator was null");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolator = interpolator;
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolator = interpolator;
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
public Interpolator1D getInterpolator() {
return _interpolator;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return _interpolator.interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolator());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolator());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolator == null ? 0 : _interpolator.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolator == null) {
if (other._interpolator != null)
return false;
} else if (!_interpolator.equals(other._interpolator))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolator=").append(Interpolator1DFactory.getInterpolatorName(getInterpolator())).append(',');
sb.append("rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
return message;
}
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleDoubleMap(final FudgeSerializationContext context, final Map<Double, Double> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", entry.getValue());
}
return message;
}
public static SortedMap<Double, Double> decodeSortedDoubleDoubleMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Double> result = new TreeMap<Double, Double>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
while (keyIter.hasNext()) {
assert valueIter.hasNext();
final FudgeField keyField = keyIter.next();
final FudgeField valueField = valueIter.next();
result.put((Double) keyField.getValue(), (Double) valueField.getValue());
}
return result;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeField;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final Interpolator1D _interpolator;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolator == null)
throw new IllegalArgumentException("Interpolator was null");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolator = interpolator;
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolator = interpolator;
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
public Interpolator1D getInterpolator() {
return _interpolator;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return _interpolator.interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolator());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolator());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolator == null ? 0 : _interpolator.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolator == null) {
if (other._interpolator != null)
return false;
} else if (!_interpolator.equals(other._interpolator))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolator=").append(Interpolator1DFactory.getInterpolatorName(getInterpolator())).append(',');
sb.append("rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
return message;
}
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleDoubleMap(final FudgeSerializationContext context, final Map<Double, Double> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", entry.getValue());
}
return message;
}
public static SortedMap<Double, Double> decodeSortedDoubleDoubleMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Double> result = new TreeMap<Double, Double>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
while (keyIter.hasNext()) {
assert valueIter.hasNext();
final FudgeField keyField = keyIter.next();
final FudgeField valueField = valueIter.next();
result.put((Double) keyField.getValue(), (Double) valueField.getValue());
}
return result;
}
}
Left
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final Interpolator1D _interpolator;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolator == null)
throw new IllegalArgumentException("Interpolator was null");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolator = interpolator;
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolator = interpolator;
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
public Interpolator1D getInterpolator() {
return _interpolator;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return _interpolator.interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolator());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolator());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolator == null ? 0 : _interpolator.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolator == null) {
if (other._interpolator != null)
return false;
} else if (!_interpolator.equals(other._interpolator))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolator=").append(Interpolator1DFactory.getInterpolatorName(getInterpolator())).append(',');
sb.append("rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
return message;
}
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(RATE_DATA_FIELD_NAME)));
final SortedMap<Double, Double> dfData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(DF_DATA_FIELD_NAME)));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final Interpolator1D _interpolator;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolator == null)
throw new IllegalArgumentException("Interpolator was null");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolator = interpolator;
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolator = interpolator;
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
public Interpolator1D getInterpolator() {
return _interpolator;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return _interpolator.interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolator());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolator());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolator());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolator == null ? 0 : _interpolator.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolator == null) {
if (other._interpolator != null)
return false;
} else if (!_interpolator.equals(other._interpolator))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolator=").append(Interpolator1DFactory.getInterpolatorName(getInterpolator())).append(',');
sb.append("rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
return message;
}
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(RATE_DATA_FIELD_NAME)));
final SortedMap<Double, Double> dfData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(DF_DATA_FIELD_NAME)));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
}
}
Right
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeField;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This cannot be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
return message;
}
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleDoubleMap(final FudgeSerializationContext context, final Map<Double, Double> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", entry.getValue());
}
return message;
}
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Double> decodeSortedDoubleDoubleMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Double> result = new TreeMap<Double, Double>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
while (keyIter.hasNext()) {
assert valueIter.hasNext();
final FudgeField keyField = keyIter.next();
final FudgeField valueField = valueIter.next();
result.put((Double) keyField.getValue(), (Double) valueField.getValue());
}
return result;
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeField;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This cannot be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
return message;
}
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleDoubleMap(final FudgeSerializationContext context, final Map<Double, Double> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", entry.getValue());
}
return message;
}
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Double> decodeSortedDoubleDoubleMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Double> result = new TreeMap<Double, Double>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
while (keyIter.hasNext()) {
assert valueIter.hasNext();
final FudgeField keyField = keyIter.next();
final FudgeField valueField = valueIter.next();
result.put((Double) keyField.getValue(), (Double) valueField.getValue());
}
return result;
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
<<<<<<< MINE
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
=======
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
>>>>>>> YOURS
return message;
}
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
<<<<<<< MINE
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
=======
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
>>>>>>> YOURS
return message;
}
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
<<<<<<< MINE
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
=======
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
>>>>>>> YOURS
return message;
}
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(RATE_DATA_FIELD_NAME)));
final SortedMap<Double, Double> dfData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(DF_DATA_FIELD_NAME)));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
}
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
<<<<<<< MINE
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
=======
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
>>>>>>> YOURS
return message;
}
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(RATE_DATA_FIELD_NAME)));
final SortedMap<Double, Double> dfData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(DF_DATA_FIELD_NAME)));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
}
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
Safe
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
<<<<<<< MINE
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
=======
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
>>>>>>> YOURS
return message;
}
<<<<<<< MINE
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
=======
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(RATE_DATA_FIELD_NAME)));
final SortedMap<Double, Double> dfData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(DF_DATA_FIELD_NAME)));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
}
>>>>>>> YOURS
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This can be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
<<<<<<< MINE
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
=======
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
>>>>>>> YOURS
return message;
}
<<<<<<< MINE
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
=======
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(RATE_DATA_FIELD_NAME)));
final SortedMap<Double, Double> dfData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(DF_DATA_FIELD_NAME)));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
}
>>>>>>> YOURS
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
Unstructured
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This cannot be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
<<<<<<< MINE
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
=======
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
>>>>>>> YOURS
return message;
}
<<<<<<< MINE
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(RATE_DATA_FIELD_NAME)));
final SortedMap<Double, Double> dfData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(DF_DATA_FIELD_NAME)));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
=======
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleDoubleMap(final FudgeSerializationContext context, final Map<Double, Double> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", entry.getValue());
}
return message;
}
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Double> decodeSortedDoubleDoubleMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Double> result = new TreeMap<Double, Double>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
while (keyIter.hasNext()) {
assert valueIter.hasNext();
final FudgeField keyField = keyIter.next();
final FudgeField valueField = valueIter.next();
result.put((Double) keyField.getValue(), (Double) valueField.getValue());
}
return result;
>>>>>>> YOURS
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.interestrate.curve;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.fudgemsg.FudgeFieldContainer;
import org.fudgemsg.MutableFudgeFieldContainer;
import org.fudgemsg.mapping.FudgeDeserializationContext;
import org.fudgemsg.mapping.FudgeSerializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.Interpolator1DFactory;
/**
*
* @author emcleod
*/
public class InterpolatedDiscountCurve extends DiscountCurve implements Serializable {
private static final String INTERPOLATOR_FIELD_NAME = "interpolator";
private static final String RATE_DATA_FIELD_NAME = "rateData";
private static final String DF_DATA_FIELD_NAME = "dfData";
private static final Logger s_Log = LoggerFactory.getLogger(InterpolatedDiscountCurve.class);
private final SortedMap<Double, Double> _rateData;
private final SortedMap<Double, Double> _dfData;
private final SortedMap<Double, Interpolator1D> _interpolators;
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolator
* An interpolator to get interest rates / discount factors for
* maturities that fall in between nodes. This cannot be null.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Interpolator1D interpolator) {
this(data, Collections.<Double, Interpolator1D> singletonMap(Double.POSITIVE_INFINITY, interpolator));
}
/**
*
* @param data
* A map containing pairs of maturities in years and interest rates
* in percent (e.g. 3% = 0.03)
* @param interpolators
* A map of times and interpolators. This allows different interpolators
* to be used for different regions of the curve. The time value is the
* maximum time in years for which an interpolator is valid.
* @throws IllegalArgumentException
* Thrown if the data map is null or empty, or if it contains a
* negative time to maturity.
*/
public InterpolatedDiscountCurve(final Map<Double, Double> data, final Map<Double, Interpolator1D> interpolators) {
if (data == null)
throw new IllegalArgumentException("Data map was null");
if (interpolators == null)
throw new IllegalArgumentException("Interpolator was null");
if (interpolators.size() == 0)
throw new IllegalArgumentException("Interpolator map did not contain values");
if (data.size() < 2)
throw new IllegalArgumentException("Need to have at least two data points for an interpolated curve");
for (final Map.Entry<Double, Interpolator1D> entry : interpolators.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Interpolator for time " + entry.getKey() + " was null");
}
final SortedMap<Double, Double> sortedRates = new TreeMap<Double, Double>();
final SortedMap<Double, Double> sortedDF = new TreeMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
if (entry.getKey() < 0)
throw new IllegalArgumentException("Cannot have negative time in a discount curve");
sortedRates.put(entry.getKey(), entry.getValue());
sortedDF.put(entry.getKey(), Math.exp(-entry.getValue() * entry.getKey()));
}
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(new TreeMap<Double, Interpolator1D>(interpolators));
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final Interpolator1D interpolator) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
final SortedMap<Double, Interpolator1D> sorted = new TreeMap<Double, Interpolator1D>();
sorted.put(Double.POSITIVE_INFINITY, interpolator);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(sorted);
}
protected InterpolatedDiscountCurve(final SortedMap<Double, Double> sortedRates, final SortedMap<Double, Double> sortedDF, final SortedMap<Double, Interpolator1D> interpolators) {
_rateData = Collections.<Double, Double> unmodifiableSortedMap(sortedRates);
_dfData = Collections.<Double, Double> unmodifiableSortedMap(sortedDF);
_interpolators = Collections.<Double, Interpolator1D> unmodifiableSortedMap(interpolators);
}
/**
*
* @return The data sorted by maturity. Note that these are discount factors,
* not rates.
*/
public SortedMap<Double, Double> getData() {
return _rateData;
}
/**
*
* @return The interpolator for this curve.
*/
public Map<Double, Interpolator1D> getInterpolators() {
return _interpolators;
}
/**
* @return The interest rate for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getInterestRate(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
return -Math.log(getDiscountFactor(t)) / t;
}
/**
* @return The discount factor for time to maturity <i>t</i>.
* @throws IllegalArgumentException
* If the time to maturity is negative.
*/
@Override
public double getDiscountFactor(final Double t) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("Cannot have a negative time in a DiscountCurve: provided " + t);
final Double key = _interpolators.headMap(t).lastKey();
return _interpolators.get(key).interpolate(_dfData, t).getResult();
}
@Override
public Set<Double> getMaturities() {
return getData().keySet();
}
@Override
public DiscountCurve withParallelShift(final Double shift) {
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> map = new HashMap<Double, Double>();
for (final Map.Entry<Double, Double> entry : _rateData.entrySet()) {
map.put(entry.getKey(), entry.getValue() + shift);
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withSingleShift(final Double t, final Double shift) {
if (t == null)
throw new IllegalArgumentException("t was null");
if (t < 0)
throw new IllegalArgumentException("t was negative");
if (shift == null)
throw new IllegalArgumentException("Shift was null");
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
if (data.containsKey(t)) {
map.put(t, data.get(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
map.put(t, getInterestRate(t) + shift);
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public DiscountCurve withMultipleShifts(final Map<Double, Double> shifts) {
if (shifts == null)
throw new IllegalArgumentException("Shift map was null");
if (shifts.isEmpty()) {
s_Log.info("Shift map was empty; returning identical curve");
return new InterpolatedDiscountCurve(getData(), getInterpolators());
}
final Map<Double, Double> data = getData();
final Map<Double, Double> map = new HashMap<Double, Double>(data);
for (final Map.Entry<Double, Double> entry : shifts.entrySet()) {
if (entry.getValue() == null)
throw new IllegalArgumentException("Null shift in shift map");
if (entry.getKey() < 0)
throw new IllegalArgumentException("Negative time in shift map");
if (data.containsKey(entry.getKey())) {
map.put(entry.getKey(), data.get(entry.getKey()) + entry.getValue());
} else {
map.put(entry.getKey(), getInterestRate(entry.getKey()) + entry.getValue());
}
}
return new InterpolatedDiscountCurve(map, getInterpolators());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (_rateData == null ? 0 : _rateData.hashCode());
result = prime * result + (_interpolators == null ? 0 : _interpolators.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InterpolatedDiscountCurve other = (InterpolatedDiscountCurve) obj;
if (_rateData == null) {
if (other._rateData != null)
return false;
} else if (!_rateData.equals(other._rateData))
return false;
if (_interpolators == null) {
if (other._interpolators != null)
return false;
} else if (!_interpolators.equals(other._interpolators))
return false;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("InterpolatedDiscountCurve[");
sb.append("interpolators={");// .append(Interpolator1DFactory.getInterpolatorName(getInterpolators())).append(',');
for (final Map.Entry<Double, Interpolator1D> e : _interpolators.entrySet()) {
sb.append(e.getKey()).append('=').append(Interpolator1DFactory.getInterpolatorName(e.getValue())).append(',');
}
sb.append("},rate_data={");
for (final Map.Entry<Double, Double> e : _rateData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("},df_data={");
for (final Map.Entry<Double, Double> e : _dfData.entrySet()) {
sb.append(e.getKey()).append('=').append(e.getValue()).append(',');
}
sb.append("}]");
return sb.toString();
}
public FudgeFieldContainer toFudgeMsg(final FudgeSerializationContext context) {
final MutableFudgeFieldContainer message = context.newMessage();
message.add(null, 0, getClass().getName());
<<<<<<< MINE
message.add(INTERPOLATOR_FIELD_NAME, Interpolator1DFactory.getInterpolatorName(getInterpolator()));
context.objectToFudgeMsg(message, RATE_DATA_FIELD_NAME, null, _rateData);
context.objectToFudgeMsg(message, DF_DATA_FIELD_NAME, null, _dfData);
=======
message.add(INTERPOLATOR_FIELD_NAME, encodeDoubleInterpolator1DMap(context, _interpolators));
message.add(RATE_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _rateData));
message.add(DF_DATA_FIELD_NAME, encodeDoubleDoubleMap(context, _dfData));
>>>>>>> YOURS
return message;
}
<<<<<<< MINE
@SuppressWarnings("unchecked")
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeDeserializationContext context, final FudgeFieldContainer message) {
final Interpolator1D interpolator = Interpolator1DFactory.getInterpolator(message.getString(INTERPOLATOR_FIELD_NAME));
final SortedMap<Double, Double> rateData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(RATE_DATA_FIELD_NAME)));
final SortedMap<Double, Double> dfData = new TreeMap<Double, Double>(context.fieldValueToObject(Map.class, message.getByName(DF_DATA_FIELD_NAME)));
return new InterpolatedDiscountCurve(rateData, dfData, interpolator);
=======
public static InterpolatedDiscountCurve fromFudgeMsg(final FudgeFieldContainer message) {
final SortedMap<Double, Double> rateData = decodeSortedDoubleDoubleMap(message.getMessage(RATE_DATA_FIELD_NAME));
final SortedMap<Double, Double> dfData = decodeSortedDoubleDoubleMap(message.getMessage(DF_DATA_FIELD_NAME));
final SortedMap<Double, Interpolator1D> interpolators = decodeSortedDoubleInterpolator1DMap(message.getMessage(INTERPOLATOR_FIELD_NAME));
return new InterpolatedDiscountCurve(rateData, dfData, interpolators);
}
// REVIEW kirk 2010-03-31 -- These probably belong in a utility class
// methinks.
// TODO 2010-04-06 Andrew -- Use the FSC/FDC methods for automatic map
// encoding
public static MutableFudgeFieldContainer encodeDoubleDoubleMap(final FudgeSerializationContext context, final Map<Double, Double> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Double> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", entry.getValue());
}
return message;
}
public static MutableFudgeFieldContainer encodeDoubleInterpolator1DMap(final FudgeSerializationContext context, final Map<Double, Interpolator1D> data) {
final MutableFudgeFieldContainer message = context.newMessage();
for (final Map.Entry<Double, Interpolator1D> entry : data.entrySet()) {
message.add("key", entry.getKey());
message.add("value", Interpolator1DFactory.getInterpolatorName(entry.getValue()));
}
return message;
}
public static SortedMap<Double, Double> decodeSortedDoubleDoubleMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Double> result = new TreeMap<Double, Double>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
while (keyIter.hasNext()) {
assert valueIter.hasNext();
final FudgeField keyField = keyIter.next();
final FudgeField valueField = valueIter.next();
result.put((Double) keyField.getValue(), (Double) valueField.getValue());
}
return result;
>>>>>>> YOURS
}
public static SortedMap<Double, Interpolator1D> decodeSortedDoubleInterpolator1DMap(final FudgeFieldContainer msg) {
final SortedMap<Double, Interpolator1D> result = new TreeMap<Double, Interpolator1D>();
final Iterator<FudgeField> keyIter = msg.getAllByName("key").iterator();
final Iterator<FudgeField> valueIter = msg.getAllByName("value").iterator();
FudgeField keyField, valueField;
while (keyIter.hasNext()) {
assert valueIter.hasNext();
keyField = keyIter.next();
valueField = valueIter.next();
result.put((Double) keyField.getValue(), Interpolator1DFactory.getInterpolator((String) valueField.getValue()));
}
return result;
}
}
Diff Result
No diff
Case 70 - java_ogplatform.rev_a37de_e434d..EHCachingHistoricalDataProvider.java
Base
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final boolean INCLUDE_LAST_DAY = true;
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return null;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return null;
}
}
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
return _dsids.hashCode() ^ _field.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
return getHistoricalData(identifiers, null, null, null);
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
} else {
return null;
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final boolean INCLUDE_LAST_DAY = true;
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return null;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return null;
}
}
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
return _dsids.hashCode() ^ _field.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
return getHistoricalData(identifiers, null, null, null);
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
} else {
return null;
}
}
}
Left
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final boolean INCLUDE_LAST_DAY = true;
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
} else {
return timeseries;
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final boolean INCLUDE_LAST_DAY = true;
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
} else {
return timeseries;
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
<<<<<<< MINE
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String dataField) {
return getHistoricalData(identifiers, (LocalDate) null, dataSource, dataProvider, dataField);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String field,
LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
<<<<<<< MINE
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
return getHistoricalData(identifiers, null, null, null, null);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, LocalDate start, boolean inclusiveStart, LocalDate end,
boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
<<<<<<< MINE
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
} else {
return timeseries;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String dataField) {
MetaDataKey key = new MetaDataKey(currentDate, identifiers, dataSource, dataProvider, dataField);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, dataField);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate) {
return getHistoricalData(identifiers, currentDate, null, null, null);
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
<<<<<<< MINE
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String dataField) {
return getHistoricalData(identifiers, (LocalDate) null, dataSource, dataProvider, dataField);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String field,
LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
<<<<<<< MINE
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
return getHistoricalData(identifiers, null, null, null, null);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, LocalDate start, boolean inclusiveStart, LocalDate end,
boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
<<<<<<< MINE
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
} else {
return timeseries;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String dataField) {
MetaDataKey key = new MetaDataKey(currentDate, identifiers, dataSource, dataProvider, dataField);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, dataField);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate) {
return getHistoricalData(identifiers, currentDate, null, null, null);
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
<<<<<<< MINE
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String dataField) {
return getHistoricalData(identifiers, (LocalDate) null, dataSource, dataProvider, dataField);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
<<<<<<< MINE
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
return getHistoricalData(identifiers, null, null, null, null);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
<<<<<<< MINE
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
} else {
return timeseries;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String dataField) {
MetaDataKey key = new MetaDataKey(currentDate, identifiers, dataSource, dataProvider, dataField);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, dataField);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String field,
LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate, dataSource, dataProvider, field);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate) {
return getHistoricalData(identifiers, currentDate, null, null, null);
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, LocalDate start, boolean inclusiveStart, LocalDate end,
boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
<<<<<<< MINE
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String dataField) {
return getHistoricalData(identifiers, (LocalDate) null, dataSource, dataProvider, dataField);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
<<<<<<< MINE
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
return getHistoricalData(identifiers, null, null, null, null);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
<<<<<<< MINE
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
} else {
return timeseries;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String dataField) {
MetaDataKey key = new MetaDataKey(currentDate, identifiers, dataSource, dataProvider, dataField);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, dataField);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String field,
LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate, dataSource, dataProvider, field);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate) {
return getHistoricalData(identifiers, currentDate, null, null, null);
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, LocalDate start, boolean inclusiveStart, LocalDate end,
boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
}
Safe
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
<<<<<<< MINE
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String dataField) {
return getHistoricalData(identifiers, (LocalDate) null, dataSource, dataProvider, dataField);
>>>>>>> YOURS
}
<<<<<<< MINE
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String field,
LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate, dataSource, dataProvider, field);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
=======
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
>>>>>>> YOURS
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
<<<<<<< MINE
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
return getHistoricalData(identifiers, null, null, null, null);
>>>>>>> YOURS
}
<<<<<<< MINE
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, LocalDate start, boolean inclusiveStart, LocalDate end,
boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
=======
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
>>>>>>> YOURS
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
<<<<<<< MINE
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
} else {
return timeseries;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String dataField) {
MetaDataKey key = new MetaDataKey(currentDate, identifiers, dataSource, dataProvider, dataField);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, dataField);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate) {
return getHistoricalData(identifiers, currentDate, null, null, null);
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
<<<<<<< MINE
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String dataField) {
return getHistoricalData(identifiers, (LocalDate) null, dataSource, dataProvider, dataField);
>>>>>>> YOURS
}
<<<<<<< MINE
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String field,
LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate, dataSource, dataProvider, field);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
=======
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
>>>>>>> YOURS
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
<<<<<<< MINE
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
=======
return getHistoricalData(identifiers, null, null, null, null);
>>>>>>> YOURS
}
<<<<<<< MINE
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, LocalDate start, boolean inclusiveStart, LocalDate end,
boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
=======
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
>>>>>>> YOURS
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
<<<<<<< MINE
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
} else {
return timeseries;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String dataField) {
MetaDataKey key = new MetaDataKey(currentDate, identifiers, dataSource, dataProvider, dataField);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, dataField);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate) {
return getHistoricalData(identifiers, currentDate, null, null, null);
}
}
Unstructured
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
<<<<<<< MINE
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final boolean INCLUDE_LAST_DAY = true;
=======
>>>>>>> YOURS
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
<<<<<<< MINE
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
=======
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String dataField) {
return getHistoricalData(identifiers, (LocalDate) null, dataSource, dataProvider, dataField);
>>>>>>> YOURS
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
<<<<<<< MINE
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
@Override
=======
>>>>>>> YOURS
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
<<<<<<< MINE
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
=======
return getHistoricalData(identifiers, null, null, null, null);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
<<<<<<< MINE
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
} else {
return timeseries;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String dataField) {
MetaDataKey key = new MetaDataKey(currentDate, identifiers, dataSource, dataProvider, dataField);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, dataField);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String field,
LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate, dataSource, dataProvider, field);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate) {
return getHistoricalData(identifiers, currentDate, null, null, null);
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, LocalDate start, boolean inclusiveStart, LocalDate end,
boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
}/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.historicaldata;
import java.io.Serializable;
import javax.time.calendar.LocalDate;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import net.sf.ehcache.event.RegisteredEventListeners;
import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
import org.apache.commons.lang.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.id.IdentifierBundle;
import com.opengamma.id.UniqueIdentifier;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ehcache.EHCacheUtils;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
*
*/
public class EHCachingHistoricalDataProvider implements HistoricalDataSource {
private static final Logger s_logger = LoggerFactory.getLogger(EHCachingHistoricalDataProvider.class);
<<<<<<< MINE
private static final LocalDateDoubleTimeSeries EMPTY_TIMESERIES = new ArrayLocalDateDoubleTimeSeries();
private static final boolean INCLUDE_LAST_DAY = true;
=======
>>>>>>> YOURS
private static final String CACHE_NAME = "HistoricalDataCache";
private final HistoricalDataSource _underlying;
private final CacheManager _manager;
private final Cache _cache;
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager cacheManager, int maxElementsInMemory, MemoryStoreEvictionPolicy memoryStoreEvictionPolicy,
boolean overflowToDisk, String diskStorePath, boolean eternal, long timeToLiveSeconds, long timeToIdleSeconds, boolean diskPersistent, long diskExpiryThreadIntervalSeconds,
RegisteredEventListeners registeredEventListeners) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(cacheManager, "cacheManager");
_underlying = underlying;
_manager = cacheManager;
EHCacheUtils.addCache(_manager, CACHE_NAME, maxElementsInMemory, memoryStoreEvictionPolicy, overflowToDisk, diskStorePath, eternal, timeToLiveSeconds, timeToIdleSeconds, diskPersistent,
diskExpiryThreadIntervalSeconds, registeredEventListeners);
_cache = EHCacheUtils.getCacheFromManager(_manager, CACHE_NAME);
}
public EHCachingHistoricalDataProvider(HistoricalDataSource underlying, CacheManager manager) {
ArgumentChecker.notNull(underlying, "Underlying Historical Data Provider");
ArgumentChecker.notNull(manager, "Cache Manager");
_underlying = underlying;
EHCacheUtils.addCache(manager, CACHE_NAME);
_cache = EHCacheUtils.getCacheFromManager(manager, CACHE_NAME);
_manager = manager;
}
/**
* @return the underlying
*/
public HistoricalDataSource getUnderlying() {
return _underlying;
}
/**
* @return the CacheManager
*/
public CacheManager getCacheManager() {
return _manager;
}
@Override
<<<<<<< MINE
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field) {
MetaDataKey key = new MetaDataKey(identifiers, dataSource, dataProvider, field);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, field);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle dsids, String dataSource, String dataProvider, String field, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(dsids, dataSource, dataProvider, field);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
=======
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String dataField) {
return getHistoricalData(identifiers, (LocalDate) null, dataSource, dataProvider, dataField);
>>>>>>> YOURS
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid) {
Element element = _cache.get(uid);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof LocalDateDoubleTimeSeries) {
LocalDateDoubleTimeSeries ts = (LocalDateDoubleTimeSeries) value;
s_logger.debug("retrieved time series: {} from cache", ts);
return ts;
} else {
s_logger.error("returned object {} from cache, not a LocalDateDoubleTimeSeries", value);
return EMPTY_TIMESERIES;
}
} else {
LocalDateDoubleTimeSeries ts = _underlying.getHistoricalData(uid);
_cache.put(new Element(uid, ts));
return ts;
}
}
@Override
<<<<<<< MINE
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, LocalDate end) {
LocalDateDoubleTimeSeries ts = getHistoricalData(uid);
if (ts != null) {
return (LocalDateDoubleTimeSeries) ts.subSeries(start, true, end, INCLUDE_LAST_DAY);
} else {
return EMPTY_TIMESERIES;
}
}
private static class MetaDataKey implements Serializable {
private final IdentifierBundle _dsids;
private final String _dataSource;
private final String _dataProvider;
private final String _field;
public MetaDataKey(IdentifierBundle dsids, String dataSource, String dataProvider, String field) {
_dsids = dsids;
_dataSource = dataSource;
_dataProvider = dataProvider;
_field = field;
}
@Override
public int hashCode() {
final int shift = 17;
int hc = _dsids.hashCode();
hc *= shift;
if (_dataSource != null) {
hc += _dataSource.hashCode();
}
hc *= shift;
if (_dataProvider != null) {
hc += _dataProvider.hashCode();
}
hc *= shift;
if (_field != null) {
hc += _field.hashCode();
}
return hc;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MetaDataKey)) {
return false;
}
MetaDataKey other = (MetaDataKey) obj;
if (_field == null) {
if (other._field != null) {
return false;
}
} else if (!_field.equals(other._field)) {
return false;
}
if (_dsids == null) {
if (other._dsids != null) {
return false;
}
} else if (!_dsids.equals(other._dsids)) {
return false;
}
if (_dataProvider == null) {
if (other._dataProvider != null) {
return false;
}
} else if (!_dataProvider.equals(other._dataProvider)) {
return false;
}
if (_dataSource == null) {
if (other._dataSource != null) {
return false;
}
} else if (!_dataSource.equals(other._dataSource)) {
return false;
}
return true;
}
}
@Override
=======
>>>>>>> YOURS
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers) {
<<<<<<< MINE
MetaDataKey key = new MetaDataKey(identifiers, null, null, null);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else if (value == null) {
s_logger.debug("cached miss on {}", identifiers);
return Pair.of(null, EMPTY_TIMESERIES);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return Pair.of(null, EMPTY_TIMESERIES);
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers);
_cache.put(new Element(key, tsPair.getFirst()));
if (tsPair.getFirst() != null) {
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
}
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, LocalDate end) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, true, end, INCLUDE_LAST_DAY);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
=======
return getHistoricalData(identifiers, null, null, null, null);
>>>>>>> YOURS
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, String dataSource, String dataProvider, String field, LocalDate start,
boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, dataSource, dataProvider, field);
<<<<<<< MINE
if (tsPair != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getSecond().subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return Pair.of(null, EMPTY_TIMESERIES);
}
}
@Override
public LocalDateDoubleTimeSeries getHistoricalData(UniqueIdentifier uid, LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
LocalDateDoubleTimeSeries timeseries = getHistoricalData(uid);
<<<<<<< MINE
if (!timeseries.isEmpty()) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, !exclusiveEnd);
=======
if (timeseries != null) {
return (LocalDateDoubleTimeSeries) timeseries.subSeries(start, inclusiveStart, end, exclusiveEnd);
>>>>>>> YOURS
} else {
return timeseries;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String dataField) {
MetaDataKey key = new MetaDataKey(currentDate, identifiers, dataSource, dataProvider, dataField);
Element element = _cache.get(key);
if (element != null) {
Serializable value = element.getValue();
if (value instanceof UniqueIdentifier) {
UniqueIdentifier uid = (UniqueIdentifier) value;
s_logger.debug("retrieved UID: {} from cache", uid);
LocalDateDoubleTimeSeries timeSeries = getHistoricalData(uid);
return new ObjectsPair<UniqueIdentifier, LocalDateDoubleTimeSeries>(uid, timeSeries);
} else {
s_logger.warn("returned object {} from cache, not a UniqueIdentifier", value);
return null;
}
} else {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = _underlying.getHistoricalData(identifiers, dataSource, dataProvider, dataField);
_cache.put(new Element(key, tsPair.getFirst()));
_cache.put(new Element(tsPair.getFirst(), tsPair.getSecond()));
return tsPair;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, String dataSource, String dataProvider, String field,
LocalDate start, boolean inclusiveStart, LocalDate end, boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate, dataSource, dataProvider, field);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate) {
return getHistoricalData(identifiers, currentDate, null, null, null);
}
@Override
public Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> getHistoricalData(IdentifierBundle identifiers, LocalDate currentDate, LocalDate start, boolean inclusiveStart, LocalDate end,
boolean exclusiveEnd) {
Pair<UniqueIdentifier, LocalDateDoubleTimeSeries> tsPair = getHistoricalData(identifiers, currentDate);
if (tsPair != null && tsPair.getValue() != null) {
LocalDateDoubleTimeSeries timeSeries = (LocalDateDoubleTimeSeries) tsPair.getValue().subSeries(start, inclusiveStart, end, exclusiveEnd);
return Pair.of(tsPair.getKey(), timeSeries);
} else {
return null;
}
}
}
Diff Result
No diff
Case 71 - java_ogplatform.rev_a93a7_a6e9b..MarkovChain.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister64.DEFAULT_SEED);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[] simulate(double timeToExpiry, double probState1, int n) {
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister64.DEFAULT_SEED);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[] simulate(double timeToExpiry, double probState1, int n) {
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, probState1, MersenneTwister64.DEFAULT_SEED);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] {m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
// double[] sum = new double[m];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// if (t == 0) {
// double x = (0.5 + i) / n;
// tau = -Math.log(x) / lambda;
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, probState1, MersenneTwister64.DEFAULT_SEED);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] {m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
// double[] sum = new double[m];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// if (t == 0) {
// double x = (0.5 + i) / n;
// tau = -Math.log(x) / lambda;
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[] simulate(double timeToExpiry, double probState1, int n) {
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[] simulate(double timeToExpiry, double probState1, int n) {
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] { m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] { m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, probState1, MersenneTwister64.DEFAULT_SEED);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] { m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, probState1, MersenneTwister64.DEFAULT_SEED);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] { m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
<<<<<<< MINE
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
}
=======
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, probState1, MersenneTwister64.DEFAULT_SEED);
}
>>>>>>> YOURS
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] {m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
// double[] sum = new double[m];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// if (t == 0) {
// double x = (0.5 + i) / n;
// tau = -Math.log(x) / lambda;
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
<<<<<<< MINE
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
}
=======
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, probState1, MersenneTwister64.DEFAULT_SEED);
}
>>>>>>> YOURS
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] {m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
// double[] sum = new double[m];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// if (t == 0) {
// double x = (0.5 + i) / n;
// tau = -Math.log(x) / lambda;
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
<<<<<<< MINE
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, probState1, MersenneTwister64.DEFAULT_SEED);
=======
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
>>>>>>> YOURS
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] {m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
// double[] sum = new double[m];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// if (t == 0) {
// double x = (0.5 + i) / n;
// tau = -Math.log(x) / lambda;
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.model.finitedifference;
import org.apache.commons.lang.Validate;
import cern.jet.random.engine.MersenneTwister;
import cern.jet.random.engine.MersenneTwister64;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.BlackPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVFunctionData;
import com.opengamma.financial.model.option.pricing.analytic.formula.CEVPriceFunction;
import com.opengamma.financial.model.option.pricing.analytic.formula.EuropeanVanillaOption;
import com.opengamma.math.function.Function1D;
/**
*
*/
public class MarkovChain {
private final double _vol1;
private final double _vol2;
private final double _lambda12;
private final double _lambda21;
private final double _probState1;
@SuppressWarnings("unused")
private final double _pi1;
private final MersenneTwister _rand;
<<<<<<< MINE
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1) {
this(vol1, vol2, lambda12, lambda21, probState1, MersenneTwister64.DEFAULT_SEED);
=======
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21) {
this(vol1, vol2, lambda12, lambda21, MersenneTwister.DEFAULT_SEED);
>>>>>>> YOURS
}
public MarkovChain(final double vol1, final double vol2, final double lambda12, final double lambda21, final double probState1, int seed) {
Validate.isTrue(vol1 >= 0);
Validate.isTrue(vol2 >= 0);
Validate.isTrue(lambda12 >= 0);
Validate.isTrue(lambda21 >= 0);
Validate.isTrue(probState1 >= 0 && probState1 <= 1.0);
_vol1 = vol1;
_vol2 = vol2;
_lambda12 = lambda12;
_lambda21 = lambda21;
_probState1 = probState1;
_pi1 = lambda21 / (lambda12 + lambda21);
_rand = new MersenneTwister64(seed);
}
public double price(final double forward, final double df, final double strike, final double timeToExiry, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
BlackPriceFunction func = new BlackPriceFunction();
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
BlackFunctionData data = new BlackFunctionData(forward, df, sigma);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double priceCEV(final double forward, final double df, final double strike, final double timeToExiry, final double beta, double[] sigmas) {
EuropeanVanillaOption option = new EuropeanVanillaOption(strike, timeToExiry, true);
CEVPriceFunction func = new CEVPriceFunction();
Function1D<CEVFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : sigmas) {
CEVFunctionData data = new CEVFunctionData(forward, df, sigma, beta);
sum += priceFunc.evaluate(data);
}
return sum / sigmas.length;
}
public double[][] price(final double[] forwards, final double[] df, final double[] strike, final double[] expiries, double[][] sigmas) {
int nTime = forwards.length;
int nStrikes = strike.length;
Validate.isTrue(nTime == df.length);
Validate.isTrue(nTime == expiries.length);
Validate.isTrue(nTime == sigmas.length);
BlackPriceFunction func = new BlackPriceFunction();
double[][] price = new double[nTime][nStrikes];
double t, k;
for (int j = 0; j < nTime; j++) {
t = expiries[j];
double[] tSigmas = sigmas[j];
for (int i = 0; i < nStrikes; i++) {
k = strike[i];
EuropeanVanillaOption option = new EuropeanVanillaOption(k, t, true);
Function1D<BlackFunctionData, Double> priceFunc = func.getPriceFunction(option);
double sum = 0;
for (double sigma : tSigmas) {
BlackFunctionData data = new BlackFunctionData(forwards[j], df[j], sigma);
sum += priceFunc.evaluate(data);
}
price[j][i] = sum / tSigmas.length;
}
}
return price;
}
public double[] getMoments(double t, double[] sigmas) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (double sigma : sigmas) {
double var = sigma * sigma;
sum1 += var;
sum2 += var * var;
sum3 += var * var * var;
}
int n = sigmas.length;
double m1 = sum1 / n;
double m2 = (sum2 - n * m1 * m1) / (n - 1);
double m3 = (sum3 - 3 * m1 * sum2 + 2 * n * m1 * m1 * m1) / n;
//System.out.println("MC m1: " + m1 + " m2: " + m2 + " m3: " + m3);
return new double[] {m1, m2, m3 };
}
public double[] simulate(double timeToExpiry, int n) {
double vol, lambda, tau;
double[] vols = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (t < timeToExpiry) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
tau = -Math.log(_rand.nextDouble()) / lambda;
if (t + tau < timeToExpiry) {
var += tau * vol * vol;
state1 = !state1;
} else {
var += (timeToExpiry - t) * vol * vol;
}
t += tau;
}
vols[i] = Math.sqrt(var / timeToExpiry);
sum += var;
}
sum /= n;
// debug
// double ave = _pi1 * timeToExpiry + (probState1 - _pi1) / (_lambda12 + _lambda21) * (1 - Math.exp(-(_lambda12 + _lambda21) * timeToExpiry));
// double exvar = _vol2 * _vol2 * timeToExpiry + (_vol1 * _vol1 - _vol2 * _vol2) * ave;
// System.out.println("debug " + "\t" + sum + "\t" + exvar);
return vols;
}
public double[][] simulate(double[] expiries, int n) {
return simulate(expiries, n, 0.0, 1.0);
}
public double[][] simulate(final double[] expiries, final int n, final double a, final double b) {
Validate.notNull(expiries);
Validate.isTrue(b > a, "need b > a");
Validate.isTrue(a >= 0.0, "Nedd a >= 0.0");
Validate.isTrue(b <= 1.0, "Nedd b <= 1.0");
int m = expiries.length;
Validate.isTrue(m > 0);
for (int j = 1; j < m; j++) {
Validate.isTrue(expiries[j] > expiries[j - 1]);
}
double vol, lambda, tau;
double[][] vols = new double[m][n];
// double[] sum = new double[m];
for (int i = 0; i < n; i++) {
int j = 0;
boolean state1 = _probState1 > _rand.nextDouble();
double t = 0;
double var = 0.0;
while (j < m && t < expiries[m - 1]) {
if (state1) {
vol = _vol1;
lambda = _lambda12;
} else {
vol = _vol2;
lambda = _lambda21;
}
// if (t == 0) {
// double x = (0.5 + i) / n;
// tau = -Math.log(x) / lambda;
// } else {
if (t == 0) {
tau = -Math.log(a + (b - a) * _rand.nextDouble()) / lambda;
} else {
tau = -Math.log(_rand.nextDouble()) / lambda;
}
state1 = !state1;
t += tau;
if (t < expiries[j]) {
var += tau * vol * vol;
} else {
var += (expiries[j] - t + tau) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
while (j < m && t > expiries[j]) {
var += (expiries[j] - expiries[j - 1]) * vol * vol;
// sum[j] += var;
vols[j][i] = Math.sqrt(var / expiries[j]);
j++;
}
var += (t - expiries[j - 1]) * vol * vol;
}
}
}
return vols;
}
}
Diff Result
No diff
Case 72 - java_ogplatform.rev_b23d5_e4471..DbHistoricalTimeSeriesMaster.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.Date;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.DataIntegrityViolationException;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcOperations;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallback;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSummary;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId, vc, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(fromDateInclusive))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(toDateInclusive));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// get data points
if (toDateInclusive == null || fromDateInclusive == null || !toDateInclusive.isBefore(fromDateInclusive)) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
//-------------------------------------------------------------------------
/**
* Get a single data point from a hts as defined by the query argument.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @param query the SQL query that returns one row with two columns: LocalDate and Double, not null
* @return a pair containing the LocalDate and the Double value of the data point, not null
*/
protected Pair<LocalDate, Double> getHTSValue(ObjectIdentifiable objectId, VersionCorrection versionCorrection, String query) {
final long oid = extractOid(objectId);
versionCorrection = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", versionCorrection.getVersionAsOf())
.addTimestamp("corrected_to_instant", versionCorrection.getCorrectedTo());
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
List<Map<String, Object>> result;
try {
result = namedJdbc.queryForList(query, args);
} catch (Exception e) {
throw new DataNotFoundException("Unable to fetch earliest/latest date/value from time series " + objectId.getObjectId());
}
return new ObjectsPair<LocalDate, Double>((LocalDate) DbDateUtils.fromSqlDateAllowNull((Date) (result.get(0).get("point_date"))), (Double) (result.get(0).get("point_value")));
}
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getSummary(uniqueId.getObjectId(), vc);
}
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
final String sqlEarliest = getExtSqlBundle().getSql("SelectEarliestDataPoint");
Pair<LocalDate, Double> earliest = getHTSValue(objectId, versionCorrection, sqlEarliest);
final String sqlLatest = getExtSqlBundle().getSql("SelectLatestDataPoint");
Pair<LocalDate, Double> latest = getHTSValue(objectId, versionCorrection, sqlLatest);
HistoricalTimeSeriesSummary result = new HistoricalTimeSeriesSummary();
result.setEarliestDate(earliest.getFirst());
result.setEarliestValue(earliest.getSecond());
result.setLatestDate(latest.getFirst());
result.setLatestValue(latest.getSecond());
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(series, "series");
s_logger.debug("add time-series data points to {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
if (series.isEmpty()) {
return uniqueId;
}
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
insertDataPointsCheckMaxDate(uniqueId, series);
return insertDataPoints(uniqueId, series, now);
}
});
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
protected void insertDataPointsCheckMaxDate(final UniqueId uniqueId, final LocalDateDoubleTimeSeries series) {
final Long docOid = extractOid(uniqueId);
final VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
final DbMapSqlParameterSource queryArgs = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addTimestamp("ver_instant", vc.getVersionAsOf())
.addTimestamp("corr_instant", vc.getCorrectedTo());
final String sql = getExtSqlBundle().getSql("SelectMaxPointDate", queryArgs);
Date result = getDbConnector().getJdbcTemplate().queryForObject(sql, Date.class, queryArgs);
if (result != null) {
LocalDate maxDate = DbDateUtils.fromSqlDateAllowNull(result);
if (series.getTimeAt(0).isBefore(maxDate)) {
throw new IllegalArgumentException("Unable to update data points of time-series " + uniqueId +
" as the update starts at " + series.getTimeAt(0) +
" which is before the latest data point in the database at " + maxDate);
}
}
}
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId insertDataPoints(final UniqueId uniqueId, final LocalDateDoubleTimeSeries series, final Instant now) {
final Long docOid = extractOid(uniqueId);
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Entry<LocalDate, Double> entry : series) {
LocalDate date = entry.getKey();
Double value = entry.getValue();
if (date == null || value == null) {
throw new IllegalArgumentException("Time-series must not contain a null value");
}
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addDate("point_date", date)
.addValue("ver_instant", nowTS)
.addValue("corr_instant", nowTS)
.addValue("point_value", value);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return createTimeSeriesUniqueId(docOid, now, now);
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(series, "series");
s_logger.debug("add time-series data points to {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
if (series.isEmpty()) {
return uniqueId;
}
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
return correctDataPoints(uniqueId, series, now);
}
});
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId correctDataPoints(UniqueId uniqueId, LocalDateDoubleTimeSeries series, Instant now) {
final Long docOid = extractOid(uniqueId);
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Entry<LocalDate, Double> entry : series) {
LocalDate date = entry.getKey();
Double value = entry.getValue();
if (date == null || value == null) {
throw new IllegalArgumentException("Time-series must not contain a null value");
}
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addDate("point_date", date)
.addValue("corr_instant", nowTS)
.addValue("point_value", value);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertCorrectDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return resolveObjectId(uniqueId, VersionCorrection.of(now, now));
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectId, "objectId");
if (fromDateInclusive != null && toDateInclusive != null) {
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
}
s_logger.debug("removing time-series data points from {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
return removeDataPoints(uniqueId, fromDateInclusive, toDateInclusive, now);
}
});
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId removeDataPoints(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive, Instant now) {
final Long docOid = extractOid(uniqueId);
// query dates to remove
final DbMapSqlParameterSource queryArgs = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(fromDateInclusive))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(toDateInclusive));
final String sqlRemove = getExtSqlBundle().getSql("SelectRemoveDataPoints");
final List<Map<String, Object>> dates = getJdbcTemplate().queryForList(sqlRemove, queryArgs);
// insert new rows to remove them
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Map<String, Object> date : dates) {
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addValue("point_date", date.get("POINT_DATE"))
.addValue("corr_instant", nowTS)
.addValue("point_value", null, Types.DOUBLE);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertCorrectDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return resolveObjectId(uniqueId, VersionCorrection.of(now, now));
}
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
protected UniqueId createTimeSeriesUniqueId(long oid, Instant verInstant, Instant corrInstant) {
String oidStr = DATA_POINT_PREFIX + oid;
Duration dur = Duration.between(verInstant, corrInstant);
String verStr = verInstant.toString() + dur.toString();
return UniqueId.of(getUniqueIdScheme(), oidStr, verStr);
}
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
@Override
protected long extractRowId(UniqueId uniqueId) {
int pos = uniqueId.getVersion().indexOf('P');
if (pos < 0) {
return super.extractRowId(uniqueId);
}
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
HistoricalTimeSeriesInfoDocument doc = get(uniqueId.getObjectId(), vc); // not very efficient, but works
return super.extractRowId(doc.getUniqueId());
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
protected UniqueId resolveObjectId(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
checkScheme(objectId);
final long oid = extractOid(objectId);
versionCorrection = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", versionCorrection.getVersionAsOf())
.addTimestamp("corrected_to_instant", versionCorrection.getCorrectedTo());
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
final UniqueIdExtractor extractor = new UniqueIdExtractor(oid);
final String sql = getExtSqlBundle().getSql("SelectUniqueIdByVersionCorrection", args);
final UniqueId uniqueId = namedJdbc.query(sql, args, extractor);
if (uniqueId == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId.getObjectId());
}
return uniqueId;
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a LocalDateDoubleTimeSeries.
*/
protected final class DataPointsExtractor implements ResultSetExtractor<LocalDateDoubleTimeSeries> {
@Override
public LocalDateDoubleTimeSeries extractData(final ResultSet rs) throws SQLException, DataAccessException {
final List<LocalDate> dates = new ArrayList<LocalDate>(256);
final List<Double> values = new ArrayList<Double>(256);
LocalDate last = null;
while (rs.next()) {
LocalDate date = DbDateUtils.fromSqlDateAllowNull(rs.getDate("POINT_DATE"));
if (date.equals(last) == false) {
last = date;
Double value = (Double) rs.getObject("POINT_VALUE");
if (value != null) {
dates.add(date);
values.add(value);
}
}
}
return new ArrayLocalDateDoubleTimeSeries(dates, values);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a UniqueId.
*/
protected final class UniqueIdExtractor implements ResultSetExtractor<UniqueId> {
private final long _objectId;
public UniqueIdExtractor(final long objectId) {
_objectId = objectId;
}
@Override
public UniqueId extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
Timestamp ver = rs.getTimestamp("max_ver_instant");
Timestamp corr = rs.getTimestamp("max_corr_instant");
if (ver == null) {
ver = rs.getTimestamp("ver_from_instant");
corr = rs.getTimestamp("corr_from_instant");
}
Instant verInstant = DbDateUtils.fromSqlTimestamp(ver);
Instant corrInstant = (corr != null ? DbDateUtils.fromSqlTimestamp(corr) : verInstant);
return createTimeSeriesUniqueId(_objectId, verInstant, corrInstant);
}
return null;
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a ManageableHistoricalTimeSeries.
*/
protected final class ManageableHTSExtractor implements ResultSetExtractor<ManageableHistoricalTimeSeries> {
private final long _objectId;
public ManageableHTSExtractor(final long objectId) {
_objectId = objectId;
}
@Override
public ManageableHistoricalTimeSeries extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
Timestamp ver = rs.getTimestamp("max_ver_instant");
Timestamp corr = rs.getTimestamp("max_corr_instant");
if (ver == null) {
ver = rs.getTimestamp("ver_from_instant");
corr = rs.getTimestamp("corr_from_instant");
}
Instant verInstant = DbDateUtils.fromSqlTimestamp(ver);
Instant corrInstant = (corr != null ? DbDateUtils.fromSqlTimestamp(corr) : verInstant);
ManageableHistoricalTimeSeries hts = new ManageableHistoricalTimeSeries();
hts.setUniqueId(createTimeSeriesUniqueId(_objectId, verInstant, corrInstant));
hts.setVersionInstant(verInstant);
hts.setCorrectionInstant(corrInstant);
// hts.setEarliestDate(DbDateUtils.fromSqlDateAllowNull(rs.getDate("min_point_date")));
// hts.setLatestDate(DbDateUtils.fromSqlDateAllowNull(rs.getDate("max_point_date")));
// hts.setEarliestValue(rs.getDouble("earliest_point_value"));
// hts.setLatestValue(rs.getDouble("latest_point_value"));
return hts;
}
return null;
}
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.Date;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.DataIntegrityViolationException;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcOperations;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallback;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSummary;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId, vc, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(fromDateInclusive))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(toDateInclusive));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// get data points
if (toDateInclusive == null || fromDateInclusive == null || !toDateInclusive.isBefore(fromDateInclusive)) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
//-------------------------------------------------------------------------
/**
* Get a single data point from a hts as defined by the query argument.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @param query the SQL query that returns one row with two columns: LocalDate and Double, not null
* @return a pair containing the LocalDate and the Double value of the data point, not null
*/
protected Pair<LocalDate, Double> getHTSValue(ObjectIdentifiable objectId, VersionCorrection versionCorrection, String query) {
final long oid = extractOid(objectId);
versionCorrection = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", versionCorrection.getVersionAsOf())
.addTimestamp("corrected_to_instant", versionCorrection.getCorrectedTo());
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
List<Map<String, Object>> result;
try {
result = namedJdbc.queryForList(query, args);
} catch (Exception e) {
throw new DataNotFoundException("Unable to fetch earliest/latest date/value from time series " + objectId.getObjectId());
}
return new ObjectsPair<LocalDate, Double>((LocalDate) DbDateUtils.fromSqlDateAllowNull((Date) (result.get(0).get("point_date"))), (Double) (result.get(0).get("point_value")));
}
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getSummary(uniqueId.getObjectId(), vc);
}
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
final String sqlEarliest = getExtSqlBundle().getSql("SelectEarliestDataPoint");
Pair<LocalDate, Double> earliest = getHTSValue(objectId, versionCorrection, sqlEarliest);
final String sqlLatest = getExtSqlBundle().getSql("SelectLatestDataPoint");
Pair<LocalDate, Double> latest = getHTSValue(objectId, versionCorrection, sqlLatest);
HistoricalTimeSeriesSummary result = new HistoricalTimeSeriesSummary();
result.setEarliestDate(earliest.getFirst());
result.setEarliestValue(earliest.getSecond());
result.setLatestDate(latest.getFirst());
result.setLatestValue(latest.getSecond());
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(series, "series");
s_logger.debug("add time-series data points to {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
if (series.isEmpty()) {
return uniqueId;
}
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
insertDataPointsCheckMaxDate(uniqueId, series);
return insertDataPoints(uniqueId, series, now);
}
});
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
protected void insertDataPointsCheckMaxDate(final UniqueId uniqueId, final LocalDateDoubleTimeSeries series) {
final Long docOid = extractOid(uniqueId);
final VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
final DbMapSqlParameterSource queryArgs = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addTimestamp("ver_instant", vc.getVersionAsOf())
.addTimestamp("corr_instant", vc.getCorrectedTo());
final String sql = getExtSqlBundle().getSql("SelectMaxPointDate", queryArgs);
Date result = getDbConnector().getJdbcTemplate().queryForObject(sql, Date.class, queryArgs);
if (result != null) {
LocalDate maxDate = DbDateUtils.fromSqlDateAllowNull(result);
if (series.getTimeAt(0).isBefore(maxDate)) {
throw new IllegalArgumentException("Unable to update data points of time-series " + uniqueId +
" as the update starts at " + series.getTimeAt(0) +
" which is before the latest data point in the database at " + maxDate);
}
}
}
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId insertDataPoints(final UniqueId uniqueId, final LocalDateDoubleTimeSeries series, final Instant now) {
final Long docOid = extractOid(uniqueId);
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Entry<LocalDate, Double> entry : series) {
LocalDate date = entry.getKey();
Double value = entry.getValue();
if (date == null || value == null) {
throw new IllegalArgumentException("Time-series must not contain a null value");
}
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addDate("point_date", date)
.addValue("ver_instant", nowTS)
.addValue("corr_instant", nowTS)
.addValue("point_value", value);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return createTimeSeriesUniqueId(docOid, now, now);
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(series, "series");
s_logger.debug("add time-series data points to {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
if (series.isEmpty()) {
return uniqueId;
}
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
return correctDataPoints(uniqueId, series, now);
}
});
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId correctDataPoints(UniqueId uniqueId, LocalDateDoubleTimeSeries series, Instant now) {
final Long docOid = extractOid(uniqueId);
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Entry<LocalDate, Double> entry : series) {
LocalDate date = entry.getKey();
Double value = entry.getValue();
if (date == null || value == null) {
throw new IllegalArgumentException("Time-series must not contain a null value");
}
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addDate("point_date", date)
.addValue("corr_instant", nowTS)
.addValue("point_value", value);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertCorrectDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return resolveObjectId(uniqueId, VersionCorrection.of(now, now));
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectId, "objectId");
if (fromDateInclusive != null && toDateInclusive != null) {
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
}
s_logger.debug("removing time-series data points from {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
return removeDataPoints(uniqueId, fromDateInclusive, toDateInclusive, now);
}
});
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId removeDataPoints(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive, Instant now) {
final Long docOid = extractOid(uniqueId);
// query dates to remove
final DbMapSqlParameterSource queryArgs = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(fromDateInclusive))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(toDateInclusive));
final String sqlRemove = getExtSqlBundle().getSql("SelectRemoveDataPoints");
final List<Map<String, Object>> dates = getJdbcTemplate().queryForList(sqlRemove, queryArgs);
// insert new rows to remove them
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Map<String, Object> date : dates) {
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addValue("point_date", date.get("POINT_DATE"))
.addValue("corr_instant", nowTS)
.addValue("point_value", null, Types.DOUBLE);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertCorrectDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return resolveObjectId(uniqueId, VersionCorrection.of(now, now));
}
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
protected UniqueId createTimeSeriesUniqueId(long oid, Instant verInstant, Instant corrInstant) {
String oidStr = DATA_POINT_PREFIX + oid;
Duration dur = Duration.between(verInstant, corrInstant);
String verStr = verInstant.toString() + dur.toString();
return UniqueId.of(getUniqueIdScheme(), oidStr, verStr);
}
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
@Override
protected long extractRowId(UniqueId uniqueId) {
int pos = uniqueId.getVersion().indexOf('P');
if (pos < 0) {
return super.extractRowId(uniqueId);
}
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
HistoricalTimeSeriesInfoDocument doc = get(uniqueId.getObjectId(), vc); // not very efficient, but works
return super.extractRowId(doc.getUniqueId());
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
protected UniqueId resolveObjectId(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
checkScheme(objectId);
final long oid = extractOid(objectId);
versionCorrection = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", versionCorrection.getVersionAsOf())
.addTimestamp("corrected_to_instant", versionCorrection.getCorrectedTo());
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
final UniqueIdExtractor extractor = new UniqueIdExtractor(oid);
final String sql = getExtSqlBundle().getSql("SelectUniqueIdByVersionCorrection", args);
final UniqueId uniqueId = namedJdbc.query(sql, args, extractor);
if (uniqueId == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId.getObjectId());
}
return uniqueId;
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a LocalDateDoubleTimeSeries.
*/
protected final class DataPointsExtractor implements ResultSetExtractor<LocalDateDoubleTimeSeries> {
@Override
public LocalDateDoubleTimeSeries extractData(final ResultSet rs) throws SQLException, DataAccessException {
final List<LocalDate> dates = new ArrayList<LocalDate>(256);
final List<Double> values = new ArrayList<Double>(256);
LocalDate last = null;
while (rs.next()) {
LocalDate date = DbDateUtils.fromSqlDateAllowNull(rs.getDate("POINT_DATE"));
if (date.equals(last) == false) {
last = date;
Double value = (Double) rs.getObject("POINT_VALUE");
if (value != null) {
dates.add(date);
values.add(value);
}
}
}
return new ArrayLocalDateDoubleTimeSeries(dates, values);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a UniqueId.
*/
protected final class UniqueIdExtractor implements ResultSetExtractor<UniqueId> {
private final long _objectId;
public UniqueIdExtractor(final long objectId) {
_objectId = objectId;
}
@Override
public UniqueId extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
Timestamp ver = rs.getTimestamp("max_ver_instant");
Timestamp corr = rs.getTimestamp("max_corr_instant");
if (ver == null) {
ver = rs.getTimestamp("ver_from_instant");
corr = rs.getTimestamp("corr_from_instant");
}
Instant verInstant = DbDateUtils.fromSqlTimestamp(ver);
Instant corrInstant = (corr != null ? DbDateUtils.fromSqlTimestamp(corr) : verInstant);
return createTimeSeriesUniqueId(_objectId, verInstant, corrInstant);
}
return null;
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a ManageableHistoricalTimeSeries.
*/
protected final class ManageableHTSExtractor implements ResultSetExtractor<ManageableHistoricalTimeSeries> {
private final long _objectId;
public ManageableHTSExtractor(final long objectId) {
_objectId = objectId;
}
@Override
public ManageableHistoricalTimeSeries extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
Timestamp ver = rs.getTimestamp("max_ver_instant");
Timestamp corr = rs.getTimestamp("max_corr_instant");
if (ver == null) {
ver = rs.getTimestamp("ver_from_instant");
corr = rs.getTimestamp("corr_from_instant");
}
Instant verInstant = DbDateUtils.fromSqlTimestamp(ver);
Instant corrInstant = (corr != null ? DbDateUtils.fromSqlTimestamp(corr) : verInstant);
ManageableHistoricalTimeSeries hts = new ManageableHistoricalTimeSeries();
hts.setUniqueId(createTimeSeriesUniqueId(_objectId, verInstant, corrInstant));
hts.setVersionInstant(verInstant);
hts.setCorrectionInstant(corrInstant);
// hts.setEarliestDate(DbDateUtils.fromSqlDateAllowNull(rs.getDate("min_point_date")));
// hts.setLatestDate(DbDateUtils.fromSqlDateAllowNull(rs.getDate("max_point_date")));
// hts.setEarliestValue(rs.getDouble("earliest_point_value"));
// hts.setLatestValue(rs.getDouble("latest_point_value"));
return hts;
}
return null;
}
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.Date;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.DataIntegrityViolationException;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcOperations;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallback;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.ChangeType;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId, vc, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
}
//-------------------------------------------------------------------------
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(series, "series");
s_logger.debug("add time-series data points to {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
if (series.isEmpty()) {
return uniqueId;
}
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
insertDataPointsCheckMaxDate(uniqueId, series);
return insertDataPoints(uniqueId, series, now);
}
});
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
protected void insertDataPointsCheckMaxDate(final UniqueId uniqueId, final LocalDateDoubleTimeSeries series) {
final Long docOid = extractOid(uniqueId);
final VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
final DbMapSqlParameterSource queryArgs = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addTimestamp("ver_instant", vc.getVersionAsOf())
.addTimestamp("corr_instant", vc.getCorrectedTo());
final String sql = getExtSqlBundle().getSql("SelectMaxPointDate", queryArgs);
Date result = getDbConnector().getJdbcTemplate().queryForObject(sql, Date.class, queryArgs);
if (result != null) {
LocalDate maxDate = DbDateUtils.fromSqlDateAllowNull(result);
if (series.getTimeAt(0).isBefore(maxDate)) {
throw new IllegalArgumentException("Unable to update data points of time-series " + uniqueId +
" as the update starts at " + series.getTimeAt(0) +
" which is before the latest data point in the database at " + maxDate);
}
}
}
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId insertDataPoints(final UniqueId uniqueId, final LocalDateDoubleTimeSeries series, final Instant now) {
final Long docOid = extractOid(uniqueId);
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Entry<LocalDate, Double> entry : series) {
LocalDate date = entry.getKey();
Double value = entry.getValue();
if (date == null || value == null) {
throw new IllegalArgumentException("Time-series must not contain a null value");
}
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addDate("point_date", date)
.addValue("ver_instant", nowTS)
.addValue("corr_instant", nowTS)
.addValue("point_value", value);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return createTimeSeriesUniqueId(docOid, now, now);
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(series, "series");
s_logger.debug("add time-series data points to {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
if (series.isEmpty()) {
return uniqueId;
}
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
return correctDataPoints(uniqueId, series, now);
}
});
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId correctDataPoints(UniqueId uniqueId, LocalDateDoubleTimeSeries series, Instant now) {
final Long docOid = extractOid(uniqueId);
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Entry<LocalDate, Double> entry : series) {
LocalDate date = entry.getKey();
Double value = entry.getValue();
if (date == null || value == null) {
throw new IllegalArgumentException("Time-series must not contain a null value");
}
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addDate("point_date", date)
.addValue("corr_instant", nowTS)
.addValue("point_value", value);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertCorrectDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return resolveObjectId(uniqueId, VersionCorrection.of(now, now));
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectId, "objectId");
if (fromDateInclusive != null && toDateInclusive != null) {
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
}
s_logger.debug("removing time-series data points from {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
return removeDataPoints(uniqueId, fromDateInclusive, toDateInclusive, now);
}
});
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId removeDataPoints(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive, Instant now) {
final Long docOid = extractOid(uniqueId);
// query dates to remove
final DbMapSqlParameterSource queryArgs = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(fromDateInclusive))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(toDateInclusive));
final String sqlRemove = getExtSqlBundle().getSql("SelectRemoveDataPoints");
final List<Map<String, Object>> dates = getJdbcTemplate().queryForList(sqlRemove, queryArgs);
// insert new rows to remove them
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Map<String, Object> date : dates) {
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addValue("point_date", date.get("POINT_DATE"))
.addValue("corr_instant", nowTS)
.addValue("point_value", null, Types.DOUBLE);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertCorrectDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return resolveObjectId(uniqueId, VersionCorrection.of(now, now));
}
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
protected UniqueId createTimeSeriesUniqueId(long oid, Instant verInstant, Instant corrInstant) {
String oidStr = DATA_POINT_PREFIX + oid;
Duration dur = Duration.between(verInstant, corrInstant);
String verStr = verInstant.toString() + dur.toString();
return UniqueId.of(getUniqueIdScheme(), oidStr, verStr);
}
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
@Override
protected long extractRowId(UniqueId uniqueId) {
int pos = uniqueId.getVersion().indexOf('P');
if (pos < 0) {
return super.extractRowId(uniqueId);
}
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
HistoricalTimeSeriesInfoDocument doc = get(uniqueId.getObjectId(), vc); // not very efficient, but works
return super.extractRowId(doc.getUniqueId());
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
protected UniqueId resolveObjectId(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
checkScheme(objectId);
final long oid = extractOid(objectId);
versionCorrection = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", versionCorrection.getVersionAsOf())
.addTimestamp("corrected_to_instant", versionCorrection.getCorrectedTo());
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
final UniqueIdExtractor extractor = new UniqueIdExtractor(oid);
final String sql = getExtSqlBundle().getSql("SelectUniqueIdByVersionCorrection", args);
final UniqueId uniqueId = namedJdbc.query(sql, args, extractor);
if (uniqueId == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId.getObjectId());
}
return uniqueId;
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a LocalDateDoubleTimeSeries.
*/
protected final class DataPointsExtractor implements ResultSetExtractor<LocalDateDoubleTimeSeries> {
@Override
public LocalDateDoubleTimeSeries extractData(final ResultSet rs) throws SQLException, DataAccessException {
final List<LocalDate> dates = new ArrayList<LocalDate>(256);
final List<Double> values = new ArrayList<Double>(256);
LocalDate last = null;
while (rs.next()) {
LocalDate date = DbDateUtils.fromSqlDateAllowNull(rs.getDate("POINT_DATE"));
if (date.equals(last) == false) {
last = date;
Double value = (Double) rs.getObject("POINT_VALUE");
if (value != null) {
dates.add(date);
values.add(value);
}
}
}
return new ArrayLocalDateDoubleTimeSeries(dates, values);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a UniqueId.
*/
protected final class UniqueIdExtractor implements ResultSetExtractor<UniqueId> {
private final long _objectId;
public UniqueIdExtractor(final long objectId) {
_objectId = objectId;
}
@Override
public UniqueId extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
Timestamp ver = rs.getTimestamp("max_ver_instant");
Timestamp corr = rs.getTimestamp("max_corr_instant");
if (ver == null) {
ver = rs.getTimestamp("ver_from_instant");
corr = rs.getTimestamp("corr_from_instant");
}
Instant verInstant = DbDateUtils.fromSqlTimestamp(ver);
Instant corrInstant = (corr != null ? DbDateUtils.fromSqlTimestamp(corr) : verInstant);
return createTimeSeriesUniqueId(_objectId, verInstant, corrInstant);
}
return null;
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a ManageableHistoricalTimeSeries.
*/
protected final class ManageableHTSExtractor implements ResultSetExtractor<ManageableHistoricalTimeSeries> {
private final long _objectId;
public ManageableHTSExtractor(final long objectId) {
_objectId = objectId;
}
@Override
public ManageableHistoricalTimeSeries extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
Timestamp ver = rs.getTimestamp("max_ver_instant");
Timestamp corr = rs.getTimestamp("max_corr_instant");
if (ver == null) {
ver = rs.getTimestamp("ver_from_instant");
corr = rs.getTimestamp("corr_from_instant");
}
Instant verInstant = DbDateUtils.fromSqlTimestamp(ver);
Instant corrInstant = (corr != null ? DbDateUtils.fromSqlTimestamp(corr) : verInstant);
ManageableHistoricalTimeSeries hts = new ManageableHistoricalTimeSeries();
hts.setUniqueId(createTimeSeriesUniqueId(_objectId, verInstant, corrInstant));
hts.setVersionInstant(verInstant);
hts.setCorrectionInstant(corrInstant);
// hts.setEarliestDate(DbDateUtils.fromSqlDateAllowNull(rs.getDate("min_point_date")));
// hts.setLatestDate(DbDateUtils.fromSqlDateAllowNull(rs.getDate("max_point_date")));
// hts.setEarliestValue(rs.getDouble("earliest_point_value"));
// hts.setLatestValue(rs.getDouble("latest_point_value"));
return hts;
}
return null;
}
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.Date;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.DataIntegrityViolationException;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcOperations;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallback;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.ChangeType;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.ArrayLocalDateDoubleTimeSeries;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
import com.opengamma.util.tuple.ObjectsPair;
import com.opengamma.util.tuple.Pair;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId, vc, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
}
//-------------------------------------------------------------------------
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(series, "series");
s_logger.debug("add time-series data points to {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
if (series.isEmpty()) {
return uniqueId;
}
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
insertDataPointsCheckMaxDate(uniqueId, series);
return insertDataPoints(uniqueId, series, now);
}
});
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
protected void insertDataPointsCheckMaxDate(final UniqueId uniqueId, final LocalDateDoubleTimeSeries series) {
final Long docOid = extractOid(uniqueId);
final VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
final DbMapSqlParameterSource queryArgs = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addTimestamp("ver_instant", vc.getVersionAsOf())
.addTimestamp("corr_instant", vc.getCorrectedTo());
final String sql = getExtSqlBundle().getSql("SelectMaxPointDate", queryArgs);
Date result = getDbConnector().getJdbcTemplate().queryForObject(sql, Date.class, queryArgs);
if (result != null) {
LocalDate maxDate = DbDateUtils.fromSqlDateAllowNull(result);
if (series.getTimeAt(0).isBefore(maxDate)) {
throw new IllegalArgumentException("Unable to update data points of time-series " + uniqueId +
" as the update starts at " + series.getTimeAt(0) +
" which is before the latest data point in the database at " + maxDate);
}
}
}
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId insertDataPoints(final UniqueId uniqueId, final LocalDateDoubleTimeSeries series, final Instant now) {
final Long docOid = extractOid(uniqueId);
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Entry<LocalDate, Double> entry : series) {
LocalDate date = entry.getKey();
Double value = entry.getValue();
if (date == null || value == null) {
throw new IllegalArgumentException("Time-series must not contain a null value");
}
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addDate("point_date", date)
.addValue("ver_instant", nowTS)
.addValue("corr_instant", nowTS)
.addValue("point_value", value);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return createTimeSeriesUniqueId(docOid, now, now);
}
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(series, "series");
s_logger.debug("add time-series data points to {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
if (series.isEmpty()) {
return uniqueId;
}
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
return correctDataPoints(uniqueId, series, now);
}
});
changeManager().entityChanged(ChangeType.CORRECTED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId correctDataPoints(UniqueId uniqueId, LocalDateDoubleTimeSeries series, Instant now) {
final Long docOid = extractOid(uniqueId);
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Entry<LocalDate, Double> entry : series) {
LocalDate date = entry.getKey();
Double value = entry.getValue();
if (date == null || value == null) {
throw new IllegalArgumentException("Time-series must not contain a null value");
}
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addDate("point_date", date)
.addValue("corr_instant", nowTS)
.addValue("point_value", value);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertCorrectDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return resolveObjectId(uniqueId, VersionCorrection.of(now, now));
}
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
ArgumentChecker.notNull(objectId, "objectId");
if (fromDateInclusive != null && toDateInclusive != null) {
ArgumentChecker.inOrderOrEqual(fromDateInclusive, toDateInclusive, "fromDateInclusive", "toDateInclusive");
}
s_logger.debug("removing time-series data points from {}", objectId);
// retry to handle concurrent conflicts
for (int retry = 0; true; retry++) {
final UniqueId uniqueId = resolveObjectId(objectId, VersionCorrection.LATEST);
try {
final Instant now = now();
UniqueId resultId = getTransactionTemplate().execute(new TransactionCallback<UniqueId>() {
@Override
public UniqueId doInTransaction(final TransactionStatus status) {
return removeDataPoints(uniqueId, fromDateInclusive, toDateInclusive, now);
}
});
changeManager().entityChanged(ChangeType.UPDATED, uniqueId, resultId, now);
return resultId;
} catch (DataIntegrityViolationException ex) {
if (retry == getMaxRetries()) {
throw ex;
}
} catch (DataAccessException ex) {
throw fixSQLExceptionCause(ex);
}
}
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
protected UniqueId removeDataPoints(UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive, Instant now) {
final Long docOid = extractOid(uniqueId);
// query dates to remove
final DbMapSqlParameterSource queryArgs = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(fromDateInclusive))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(toDateInclusive));
final String sqlRemove = getExtSqlBundle().getSql("SelectRemoveDataPoints");
final List<Map<String, Object>> dates = getJdbcTemplate().queryForList(sqlRemove, queryArgs);
// insert new rows to remove them
final Timestamp nowTS = DbDateUtils.toSqlTimestamp(now);
final List<DbMapSqlParameterSource> argsList = new ArrayList<DbMapSqlParameterSource>();
for (Map<String, Object> date : dates) {
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", docOid)
.addValue("point_date", date.get("POINT_DATE"))
.addValue("corr_instant", nowTS)
.addValue("point_value", null, Types.DOUBLE);
argsList.add(args);
}
final String sqlInsert = getExtSqlBundle().getSql("InsertCorrectDataPoint");
getJdbcTemplate().batchUpdate(sqlInsert, argsList.toArray(new DbMapSqlParameterSource[argsList.size()]));
return resolveObjectId(uniqueId, VersionCorrection.of(now, now));
}
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
protected UniqueId createTimeSeriesUniqueId(long oid, Instant verInstant, Instant corrInstant) {
String oidStr = DATA_POINT_PREFIX + oid;
Duration dur = Duration.between(verInstant, corrInstant);
String verStr = verInstant.toString() + dur.toString();
return UniqueId.of(getUniqueIdScheme(), oidStr, verStr);
}
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
@Override
protected long extractRowId(UniqueId uniqueId) {
int pos = uniqueId.getVersion().indexOf('P');
if (pos < 0) {
return super.extractRowId(uniqueId);
}
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
HistoricalTimeSeriesInfoDocument doc = get(uniqueId.getObjectId(), vc); // not very efficient, but works
return super.extractRowId(doc.getUniqueId());
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
protected UniqueId resolveObjectId(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
checkScheme(objectId);
final long oid = extractOid(objectId);
versionCorrection = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", versionCorrection.getVersionAsOf())
.addTimestamp("corrected_to_instant", versionCorrection.getCorrectedTo());
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
final UniqueIdExtractor extractor = new UniqueIdExtractor(oid);
final String sql = getExtSqlBundle().getSql("SelectUniqueIdByVersionCorrection", args);
final UniqueId uniqueId = namedJdbc.query(sql, args, extractor);
if (uniqueId == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId.getObjectId());
}
return uniqueId;
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a LocalDateDoubleTimeSeries.
*/
protected final class DataPointsExtractor implements ResultSetExtractor<LocalDateDoubleTimeSeries> {
@Override
public LocalDateDoubleTimeSeries extractData(final ResultSet rs) throws SQLException, DataAccessException {
final List<LocalDate> dates = new ArrayList<LocalDate>(256);
final List<Double> values = new ArrayList<Double>(256);
LocalDate last = null;
while (rs.next()) {
LocalDate date = DbDateUtils.fromSqlDateAllowNull(rs.getDate("POINT_DATE"));
if (date.equals(last) == false) {
last = date;
Double value = (Double) rs.getObject("POINT_VALUE");
if (value != null) {
dates.add(date);
values.add(value);
}
}
}
return new ArrayLocalDateDoubleTimeSeries(dates, values);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a UniqueId.
*/
protected final class UniqueIdExtractor implements ResultSetExtractor<UniqueId> {
private final long _objectId;
public UniqueIdExtractor(final long objectId) {
_objectId = objectId;
}
@Override
public UniqueId extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
Timestamp ver = rs.getTimestamp("max_ver_instant");
Timestamp corr = rs.getTimestamp("max_corr_instant");
if (ver == null) {
ver = rs.getTimestamp("ver_from_instant");
corr = rs.getTimestamp("corr_from_instant");
}
Instant verInstant = DbDateUtils.fromSqlTimestamp(ver);
Instant corrInstant = (corr != null ? DbDateUtils.fromSqlTimestamp(corr) : verInstant);
return createTimeSeriesUniqueId(_objectId, verInstant, corrInstant);
}
return null;
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a ManageableHistoricalTimeSeries.
*/
protected final class ManageableHTSExtractor implements ResultSetExtractor<ManageableHistoricalTimeSeries> {
private final long _objectId;
public ManageableHTSExtractor(final long objectId) {
_objectId = objectId;
}
@Override
public ManageableHistoricalTimeSeries extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
Timestamp ver = rs.getTimestamp("max_ver_instant");
Timestamp corr = rs.getTimestamp("max_corr_instant");
if (ver == null) {
ver = rs.getTimestamp("ver_from_instant");
corr = rs.getTimestamp("corr_from_instant");
}
Instant verInstant = DbDateUtils.fromSqlTimestamp(ver);
Instant corrInstant = (corr != null ? DbDateUtils.fromSqlTimestamp(corr) : verInstant);
ManageableHistoricalTimeSeries hts = new ManageableHistoricalTimeSeries();
hts.setUniqueId(createTimeSeriesUniqueId(_objectId, verInstant, corrInstant));
hts.setVersionInstant(verInstant);
hts.setCorrectionInstant(corrInstant);
// hts.setEarliestDate(DbDateUtils.fromSqlDateAllowNull(rs.getDate("min_point_date")));
// hts.setLatestDate(DbDateUtils.fromSqlDateAllowNull(rs.getDate("max_point_date")));
// hts.setEarliestValue(rs.getDouble("earliest_point_value"));
// hts.setLatestValue(rs.getDouble("latest_point_value"));
return hts;
}
return null;
}
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import com.opengamma.DataDuplicationException;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSummary;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* <p>
* The SQL is stored externally in {@code DbHistoricalTimeSeriesMaster.extsql}.
* Alternate databases or specific SQL requirements can be handled using database
* specific overrides, such as {@code DbHistoricalTimeSeriesMaster-MySpecialDB.extsql}.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
}
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
}
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
}
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import com.opengamma.DataDuplicationException;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSummary;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* <p>
* The SQL is stored externally in {@code DbHistoricalTimeSeriesMaster.extsql}.
* Alternate databases or specific SQL requirements can be handled using database
* specific overrides, such as {@code DbHistoricalTimeSeriesMaster-MySpecialDB.extsql}.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
}
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
}
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
}
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import com.opengamma.DataDuplicationException;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
<<<<<<< MINE
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
=======
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
>>>>>>> YOURS
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
}
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import com.opengamma.DataDuplicationException;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
<<<<<<< MINE
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
=======
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
>>>>>>> YOURS
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
}
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import com.opengamma.DataDuplicationException;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
<<<<<<< MINE
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
=======
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
>>>>>>> YOURS
}
//-------------------------------------------------------------------------
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
}
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import com.opengamma.DataDuplicationException;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
<<<<<<< MINE
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
=======
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
>>>>>>> YOURS
}
//-------------------------------------------------------------------------
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
}
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
}
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import com.opengamma.DataDuplicationException;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
<<<<<<< MINE
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
=======
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
>>>>>>> YOURS
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
<<<<<<< MINE
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
}
=======
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc);
}
>>>>>>> YOURS
<<<<<<< MINE
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
}
=======
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import com.opengamma.DataDuplicationException;
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* .
* <p>
* This class uses SQL via JDBC. The SQL may be changed by subclassing the relevant methods.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
//-------------------------------------------------------------------------
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
<<<<<<< MINE
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
=======
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
>>>>>>> YOURS
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
}
<<<<<<< MINE
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
}
=======
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc);
}
>>>>>>> YOURS
<<<<<<< MINE
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
}
=======
>>>>>>> YOURS
//-------------------------------------------------------------------------
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
/**
* Checks the data points can be inserted.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
*/
/**
* Inserts the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
/**
* Corrects the data points.
*
* @param uniqueId the unique identifier, not null
* @param series the time-series data points, not empty, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
/**
* Removes data points.
*
* @param uniqueId the unique identifier, not null
* @param fromDateInclusive the start date to remove from, not null
* @param toDateInclusive the end date to remove to, not null
* @param now the current instant, not null
* @return the unique identifier, not null
*/
//-------------------------------------------------------------------------
/**
* Creates a unique identifier.
*
* @param oid the object identifier
* @param verInstant the version instant, not null
* @param corrInstant the correction instant, not null
* @return the unique identifier
*/
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Resolves an object identifier to a unique identifier.
*
* @param objectId the time-series object identifier, not null
* @param versionCorrection the version-correction locator to search at, not null
* @return the time-series, not null
*/
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
<<<<<<< MINE
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.ChangeType;
=======
import com.opengamma.DataDuplicationException;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSummary;
>>>>>>> YOURS
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* <p>
* The SQL is stored externally in {@code DbHistoricalTimeSeriesMaster.extsql}.
* Alternate databases or specific SQL requirements can be handled using database
* specific overrides, such as {@code DbHistoricalTimeSeriesMaster-MySpecialDB.extsql}.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
<<<<<<< MINE
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
}
//-------------------------------------------------------------------------
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc);
=======
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
}
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
>>>>>>> YOURS
}
<<<<<<< MINE
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
=======
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
>>>>>>> YOURS
}
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.masterdb.historicaltimeseries;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import javax.time.Duration;
import javax.time.Instant;
import javax.time.calendar.LocalDate;
import javax.time.calendar.OffsetDateTime;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
<<<<<<< MINE
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.ChangeType;
=======
import com.opengamma.DataDuplicationException;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSummary;
>>>>>>> YOURS
import com.opengamma.extsql.ExtSqlBundle;
import com.opengamma.id.ExternalId;
import com.opengamma.id.ExternalIdBundleWithDates;
import com.opengamma.id.ExternalIdSearch;
import com.opengamma.id.ExternalIdSearchType;
import com.opengamma.id.ExternalIdWithDates;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesGetFilter;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoDocument;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoHistoryResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoMetaDataResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchRequest;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesInfoSearchResult;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeries;
import com.opengamma.master.historicaltimeseries.ManageableHistoricalTimeSeriesInfo;
import com.opengamma.masterdb.AbstractDocumentDbMaster;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.db.DbConnector;
import com.opengamma.util.db.DbDateUtils;
import com.opengamma.util.db.DbMapSqlParameterSource;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.timeseries.localdate.LocalDateDoubleTimeSeries;
/**
* A time-series master implementation using a database for persistence.
* <p>
* This is a full implementation of the time-series master using an SQL database.
* Full details of the API are in {@link HistoricalTimeSeriesMaster}.
* <p>
* This implementation uses two linked unique identifiers, one for the document
* and one for the time-series. They share the same scheme, but have different values
* and versions. All the methods accept both formats although where possible they
* should be treated separately.
* <p>
* The SQL is stored externally in {@code DbHistoricalTimeSeriesMaster.extsql}.
* Alternate databases or specific SQL requirements can be handled using database
* specific overrides, such as {@code DbHistoricalTimeSeriesMaster-MySpecialDB.extsql}.
* <p>
* This class is mutable but must be treated as immutable after configuration.
*/
public class DbHistoricalTimeSeriesMaster extends AbstractDocumentDbMaster<HistoricalTimeSeriesInfoDocument> implements HistoricalTimeSeriesMaster {
/** Logger. */
private static final Logger s_logger = LoggerFactory.getLogger(DbHistoricalTimeSeriesMaster.class);
/**
* The default scheme for unique identifiers.
*/
public static final String IDENTIFIER_SCHEME_DEFAULT = "DbHts";
/**
* The prefix used for data point unique identifiers.
*/
protected static final String DATA_POINT_PREFIX = "DP";
/**
* Dimension table.
*/
private final NamedDimensionDbTable _nameTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataFieldTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataSourceTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _dataProviderTable;
/**
* Dimension table.
*/
private final NamedDimensionDbTable _observationTimeTable;
/**
* Worker.
*/
private final DbHistoricalTimeSeriesDataPointsWorker _dataPointsWorker;
/**
* Creates an instance.
*
* @param dbConnector the database connector, not null
*/
public DbHistoricalTimeSeriesMaster(final DbConnector dbConnector) {
super(dbConnector, IDENTIFIER_SCHEME_DEFAULT);
setExtSqlBundle(ExtSqlBundle.of(dbConnector.getDialect().getExtSqlConfig(), DbHistoricalTimeSeriesMaster.class));
_nameTable = new NamedDimensionDbTable(dbConnector, "name", "hts_name", "hts_dimension_seq");
_dataFieldTable = new NamedDimensionDbTable(dbConnector, "data_field", "hts_data_field", "hts_dimension_seq");
_dataSourceTable = new NamedDimensionDbTable(dbConnector, "data_source", "hts_data_source", "hts_dimension_seq");
_dataProviderTable = new NamedDimensionDbTable(dbConnector, "data_provider", "hts_data_provider", "hts_dimension_seq");
_observationTimeTable = new NamedDimensionDbTable(dbConnector, "observation_time", "hts_observation_time", "hts_dimension_seq");
_dataPointsWorker = new DbHistoricalTimeSeriesDataPointsWorker(this);
}
//-------------------------------------------------------------------------
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getNameTable() {
return _nameTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataFieldTable() {
return _dataFieldTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataSourceTable() {
return _dataSourceTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getDataProviderTable() {
return _dataProviderTable;
}
/**
* Gets the dimension table helper.
*
* @return the table, not null
*/
protected NamedDimensionDbTable getObservationTimeTable() {
return _observationTimeTable;
}
/**
* Gets the data points worker.
*
* @return the worker, not null
*/
protected DbHistoricalTimeSeriesDataPointsWorker getDataPointsWorker() {
return _dataPointsWorker;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoMetaDataResult metaData(HistoricalTimeSeriesInfoMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
HistoricalTimeSeriesInfoMetaDataResult result = new HistoricalTimeSeriesInfoMetaDataResult();
if (request.isDataFields()) {
result.setDataFields(getDataFieldTable().names());
}
if (request.isDataSources()) {
result.setDataSources(getDataSourceTable().names());
}
if (request.isDataProviders()) {
result.setDataProviders(getDataProviderTable().names());
}
if (request.isObservationTimes()) {
result.setObservationTimes(getObservationTimeTable().names());
}
return result;
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoSearchResult search(final HistoricalTimeSeriesInfoSearchRequest request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getPagingRequest(), "request.pagingRequest");
ArgumentChecker.notNull(request.getVersionCorrection(), "request.versionCorrection");
s_logger.debug("search {}", request);
final HistoricalTimeSeriesInfoSearchResult result = new HistoricalTimeSeriesInfoSearchResult();
final List<ObjectId> objectIds = request.getObjectIds();
final ExternalIdSearch externalIdSearch = request.getExternalIdSearch();
if ((objectIds != null && objectIds.size() == 0) ||
(ExternalIdSearch.canMatch(externalIdSearch) == false)) {
result.setPaging(Paging.of(request.getPagingRequest(), 0));
return result;
}
final VersionCorrection vc = request.getVersionCorrection().withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource();
args.addTimestamp("version_as_of_instant", vc.getVersionAsOf());
args.addTimestamp("corrected_to_instant", vc.getCorrectedTo());
args.addValueNullIgnored("name", getDialect().sqlWildcardAdjustValue(request.getName()));
args.addValueNullIgnored("data_field", getDialect().sqlWildcardAdjustValue(request.getDataField()));
args.addValueNullIgnored("data_source", getDialect().sqlWildcardAdjustValue(request.getDataSource()));
args.addValueNullIgnored("data_provider", getDialect().sqlWildcardAdjustValue(request.getDataProvider()));
args.addValueNullIgnored("observation_time", getDialect().sqlWildcardAdjustValue(request.getObservationTime()));
args.addDateNullIgnored("id_validity_date", request.getValidityDate());
args.addValueNullIgnored("external_id_value", getDialect().sqlWildcardAdjustValue(request.getExternalIdValue()));
if (externalIdSearch != null) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
}
if (externalIdSearch != null && externalIdSearch.alwaysMatches() == false) {
int i = 0;
for (ExternalId id : externalIdSearch) {
args.addValue("key_scheme" + i, id.getScheme().getName());
args.addValue("key_value" + i, id.getValue());
i++;
}
args.addValue("sql_search_external_ids_type", externalIdSearch.getSearchType());
args.addValue("sql_search_external_ids", sqlSelectIdKeys(externalIdSearch));
args.addValue("id_search_size", externalIdSearch.getExternalIds().size());
}
if (objectIds != null) {
StringBuilder buf = new StringBuilder(objectIds.size() * 10);
for (ObjectId objectId : objectIds) {
checkScheme(objectId);
buf.append(extractOid(objectId)).append(", ");
}
buf.setLength(buf.length() - 2);
args.addValue("sql_search_object_ids", buf.toString());
}
args.addValue("paging_offset", request.getPagingRequest().getFirstItem());
args.addValue("paging_fetch", request.getPagingRequest().getPagingSize());
String[] sql = {getExtSqlBundle().getSql("Search", args), getExtSqlBundle().getSql("SearchCount", args)};
searchWithPaging(request.getPagingRequest(), sql, args, new HistoricalTimeSeriesDocumentExtractor(), result);
return result;
}
/**
* Gets the SQL to find all the ids for a single bundle.
* <p>
* This is too complex for the extsql mechanism.
*
* @param idSearch the identifier search, not null
* @return the SQL, not null
*/
protected String sqlSelectIdKeys(final ExternalIdSearch idSearch) {
List<String> list = new ArrayList<String>();
for (int i = 0; i < idSearch.size(); i++) {
list.add("(key_scheme = :key_scheme" + i + " AND key_value = :key_value" + i + ") ");
}
return StringUtils.join(list, "OR ");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (uniqueId.getVersion() != null && uniqueId.getVersion().contains("P")) {
VersionCorrection vc = extractTimeSeriesInstants(uniqueId);
return get(uniqueId.getObjectId(), vc);
}
return doGet(uniqueId, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
return doGetByOidInstants(objectId, versionCorrection, new HistoricalTimeSeriesDocumentExtractor(), "HistoricalTimeSeries");
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoHistoryResult history(final HistoricalTimeSeriesInfoHistoryRequest request) {
return doHistory(request, new HistoricalTimeSeriesInfoHistoryResult(), new HistoricalTimeSeriesDocumentExtractor());
}
//-------------------------------------------------------------------------
@Override
public HistoricalTimeSeriesInfoDocument add(HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
HistoricalTimeSeriesInfoSearchRequest request = new HistoricalTimeSeriesInfoSearchRequest();
request.setDataField(document.getInfo().getDataField());
request.setDataSource(document.getInfo().getDataSource());
request.setDataProvider(document.getInfo().getDataProvider());
request.setObservationTime(document.getInfo().getObservationTime());
request.setExternalIdSearch(new ExternalIdSearch(document.getInfo().getExternalIdBundle().toBundle(), ExternalIdSearchType.EXACT));
HistoricalTimeSeriesInfoSearchResult result = search(request);
if (result.getDocuments().size() > 0) {
throw new DataDuplicationException("Unable to add as similar row exists already: " + result.getDocuments().get(0).getObjectId());
}
return super.add(document);
}
/**
* Inserts a new document.
*
* @param document the document, not null
* @return the new document, not null
*/
@Override
protected HistoricalTimeSeriesInfoDocument insert(final HistoricalTimeSeriesInfoDocument document) {
ArgumentChecker.notNull(document.getInfo(), "document.info");
ArgumentChecker.notNull(document.getInfo().getName(), "document.info.name");
ArgumentChecker.notNull(document.getInfo().getDataField(), "document.info.dataField");
ArgumentChecker.notNull(document.getInfo().getDataSource(), "document.info.dataSource");
ArgumentChecker.notNull(document.getInfo().getDataProvider(), "document.info.dataProvider");
ArgumentChecker.notNull(document.getInfo().getObservationTime(), "document.info.observationTime");
final long docId = nextId("hts_master_seq");
final long docOid = (document.getUniqueId() != null ? extractOid(document.getUniqueId()) : docId);
// the arguments for inserting into the table
final ManageableHistoricalTimeSeriesInfo info = document.getInfo();
final DbMapSqlParameterSource docArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("doc_oid", docOid)
.addTimestamp("ver_from_instant", document.getVersionFromInstant())
.addTimestampNullFuture("ver_to_instant", document.getVersionToInstant())
.addTimestamp("corr_from_instant", document.getCorrectionFromInstant())
.addTimestampNullFuture("corr_to_instant", document.getCorrectionToInstant())
.addValue("name_id", getNameTable().ensure(info.getName()))
.addValue("data_field_id", getDataFieldTable().ensure(info.getDataField()))
.addValue("data_source_id", getDataSourceTable().ensure(info.getDataSource()))
.addValue("data_provider_id", getDataProviderTable().ensure(info.getDataProvider()))
.addValue("observation_time_id", getObservationTimeTable().ensure(info.getObservationTime()));
// the arguments for inserting into the idkey tables
final List<DbMapSqlParameterSource> assocList = new ArrayList<DbMapSqlParameterSource>();
final List<DbMapSqlParameterSource> idKeyList = new ArrayList<DbMapSqlParameterSource>();
final String sqlSelectIdKey = getExtSqlBundle().getSql("SelectIdKey");
for (ExternalIdWithDates id : info.getExternalIdBundle()) {
final DbMapSqlParameterSource assocArgs = new DbMapSqlParameterSource()
.addValue("doc_id", docId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue())
.addValue("valid_from", DbDateUtils.toSqlDateNullFarPast(id.getValidFrom()))
.addValue("valid_to", DbDateUtils.toSqlDateNullFarFuture(id.getValidTo()));
assocList.add(assocArgs);
if (getJdbcTemplate().queryForList(sqlSelectIdKey, assocArgs).isEmpty()) {
// select avoids creating unecessary id, but id may still not be used
final long idKeyId = nextId("hts_idkey_seq");
final DbMapSqlParameterSource idkeyArgs = new DbMapSqlParameterSource()
.addValue("idkey_id", idKeyId)
.addValue("key_scheme", id.getExternalId().getScheme().getName())
.addValue("key_value", id.getExternalId().getValue());
idKeyList.add(idkeyArgs);
}
}
// insert
final String sqlDoc = getExtSqlBundle().getSql("Insert", docArgs);
final String sqlIdKey = getExtSqlBundle().getSql("InsertIdKey");
final String sqlDoc2IdKey = getExtSqlBundle().getSql("InsertDoc2IdKey");
getJdbcTemplate().update(sqlDoc, docArgs);
getJdbcTemplate().batchUpdate(sqlIdKey, idKeyList.toArray(new DbMapSqlParameterSource[idKeyList.size()]));
getJdbcTemplate().batchUpdate(sqlDoc2IdKey, assocList.toArray(new DbMapSqlParameterSource[assocList.size()]));
// set the uniqueId
final UniqueId uniqueId = createUniqueId(docOid, docId);
info.setUniqueId(uniqueId);
document.setUniqueId(uniqueId);
document.getInfo().setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
return document;
}
//-------------------------------------------------------------------------
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
UniqueId uniqueId, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
return getDataPointsWorker().getTimeSeries(uniqueId, fromDateInclusive, toDateInclusive);
}
@Override
public ManageableHistoricalTimeSeries getTimeSeries(
ObjectIdentifiable objectId, VersionCorrection versionCorrection, LocalDate fromDateInclusive, LocalDate toDateInclusive) {
<<<<<<< MINE
return getTimeSeries(objectId, versionCorrection, HistoricalTimeSeriesGetFilter.ofRange(fromDateInclusive, toDateInclusive));
}
//-------------------------------------------------------------------------
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc);
=======
return getDataPointsWorker().getTimeSeries(objectId, versionCorrection, fromDateInclusive, toDateInclusive);
}
@Override
public HistoricalTimeSeriesSummary getSummary(UniqueId uniqueId) {
return getDataPointsWorker().getSummary(uniqueId);
>>>>>>> YOURS
}
<<<<<<< MINE
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
HistoricalTimeSeriesGetFilter filter = HistoricalTimeSeriesGetFilter.ofRange(null, null);
return getTimeSeries(objectId, versionCorrection, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(UniqueId uniqueId, HistoricalTimeSeriesGetFilter filter) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
checkScheme(uniqueId);
final VersionCorrection vc;
if (uniqueId.isVersioned() && uniqueId.getValue().startsWith(DATA_POINT_PREFIX)) {
vc = extractTimeSeriesInstants(uniqueId);
} else {
vc = VersionCorrection.LATEST;
}
return getTimeSeries(uniqueId.getObjectId(), vc, filter);
}
public ManageableHistoricalTimeSeries getTimeSeries(ObjectIdentifiable objectId, VersionCorrection versionCorrection, HistoricalTimeSeriesGetFilter filter) {
final long oid = extractOid(objectId);
final VersionCorrection vc = versionCorrection.withLatestFixed(now());
final DbMapSqlParameterSource args = new DbMapSqlParameterSource()
.addValue("doc_oid", oid)
.addTimestamp("version_as_of_instant", vc.getVersionAsOf())
.addTimestamp("corrected_to_instant", vc.getCorrectedTo())
.addValue("start_date", DbDateUtils.toSqlDateNullFarPast(filter.getEarliestDate()))
.addValue("end_date", DbDateUtils.toSqlDateNullFarFuture(filter.getLatestDate()));
final NamedParameterJdbcOperations namedJdbc = getDbConnector().getJdbcTemplate().getNamedParameterJdbcOperations();
// get metadata
final String sqlCommon = getExtSqlBundle().getSql("SelectDataPointsCommon", args);
ManageableHistoricalTimeSeries result = namedJdbc.query(sqlCommon, args, new ManageableHTSExtractor(oid));
if (result == null) {
throw new DataNotFoundException("Unable to find time-series: " + objectId);
}
// set up limit on number of points to return
if (filter.getMaxPoints() == null) {
// return all points (limit all)
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() > 0) {
// return first few points
args.addValue("paging_fetch", filter.getMaxPoints());
args.addValue("order", "ASC");
} else if (filter.getMaxPoints() < 0) {
// return last few points
args.addValue("paging_fetch", -filter.getMaxPoints());
args.addValue("order", "DESC");
} else {
// Zero datapoints requested
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
return result;
}
// get data points
if (filter.getLatestDate() == null || filter.getEarliestDate() == null || !filter.getLatestDate().isBefore(filter.getEarliestDate())) {
final String sqlPoints = getExtSqlBundle().getSql("SelectDataPoints", args);
LocalDateDoubleTimeSeries series = namedJdbc.query(sqlPoints, args, new DataPointsExtractor());
result.setTimeSeries(series);
} else {
//TODO: this is a hack, most of the places that call with this condition want some kind of metadata, which it would be cheaper for us to expose specifically
result.setTimeSeries(new ArrayLocalDateDoubleTimeSeries());
}
return result;
=======
public HistoricalTimeSeriesSummary getSummary(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
return getDataPointsWorker().getSummary(objectId, versionCorrection);
>>>>>>> YOURS
}
@Override
public UniqueId updateTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().updateTimeSeriesDataPoints(objectId, series);
}
@Override
public UniqueId correctTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDateDoubleTimeSeries series) {
return getDataPointsWorker().correctTimeSeriesDataPoints(objectId, series);
}
@Override
public UniqueId removeTimeSeriesDataPoints(final ObjectIdentifiable objectId, final LocalDate fromDateInclusive, final LocalDate toDateInclusive) {
return getDataPointsWorker().removeTimeSeriesDataPoints(objectId, fromDateInclusive, toDateInclusive);
}
//-------------------------------------------------------------------------
/**
* Extracts the object row id from the object identifier.
*
* @param objectId the object identifier, not null
* @return the date, null if no point date
*/
@Override
protected long extractOid(ObjectIdentifiable objectId) {
String value = objectId.getObjectId().getValue();
if (value.startsWith(DATA_POINT_PREFIX)) {
value = value.substring(DATA_POINT_PREFIX.length());
}
try {
return Long.parseLong(value);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (non-numeric object id): " + objectId, ex);
}
}
/**
* Extracts the instants from the unique identifier.
*
* @param uniqueId the unique identifier, not null
* @return the instants, version, correction, not null
*/
protected VersionCorrection extractTimeSeriesInstants(UniqueId uniqueId) {
try {
int pos = uniqueId.getVersion().indexOf('P');
String verStr = uniqueId.getVersion().substring(0, pos);
String corrStr = uniqueId.getVersion().substring(pos);
Instant ver = OffsetDateTime.parse(verStr).toInstant();
Instant corr = ver.plus(Duration.parse(corrStr));
return VersionCorrection.of(ver, corr);
} catch (RuntimeException ex) {
throw new IllegalArgumentException("UniqueId is not from this master (invalid version): " + uniqueId, ex);
}
}
//-------------------------------------------------------------------------
/**
* Mapper from SQL rows to a HistoricalTimeSeriesInfoDocument.
*/
protected final class HistoricalTimeSeriesDocumentExtractor implements ResultSetExtractor<List<HistoricalTimeSeriesInfoDocument>> {
private long _lastDocId = -1;
private ManageableHistoricalTimeSeriesInfo _info;
private List<HistoricalTimeSeriesInfoDocument> _documents = new ArrayList<HistoricalTimeSeriesInfoDocument>();
@Override
public List<HistoricalTimeSeriesInfoDocument> extractData(final ResultSet rs) throws SQLException, DataAccessException {
while (rs.next()) {
final long docId = rs.getLong("DOC_ID");
if (_lastDocId != docId) {
_lastDocId = docId;
buildHistoricalTimeSeries(rs, docId);
}
final String idScheme = rs.getString("KEY_SCHEME");
final String idValue = rs.getString("KEY_VALUE");
final LocalDate validFrom = DbDateUtils.fromSqlDateNullFarPast(rs.getDate("KEY_VALID_FROM"));
final LocalDate validTo = DbDateUtils.fromSqlDateNullFarFuture(rs.getDate("KEY_VALID_TO"));
if (idScheme != null && idValue != null) {
ExternalIdWithDates id = ExternalIdWithDates.of(ExternalId.of(idScheme, idValue), validFrom, validTo);
_info.setExternalIdBundle(_info.getExternalIdBundle().withExternalId(id));
}
}
return _documents;
}
private void buildHistoricalTimeSeries(final ResultSet rs, final long docId) throws SQLException {
final long docOid = rs.getLong("DOC_OID");
final Timestamp versionFrom = rs.getTimestamp("VER_FROM_INSTANT");
final Timestamp versionTo = rs.getTimestamp("VER_TO_INSTANT");
final Timestamp correctionFrom = rs.getTimestamp("CORR_FROM_INSTANT");
final Timestamp correctionTo = rs.getTimestamp("CORR_TO_INSTANT");
final String name = rs.getString("NAME");
final String dataField = rs.getString("DATA_FIELD");
final String dataSource = rs.getString("DATA_SOURCE");
final String dataProvider = rs.getString("DATA_PROVIDER");
final String observationTime = rs.getString("OBSERVATION_TIME");
UniqueId uniqueId = createUniqueId(docOid, docId);
_info = new ManageableHistoricalTimeSeriesInfo();
_info.setUniqueId(uniqueId);
_info.setName(name);
_info.setDataField(dataField);
_info.setDataSource(dataSource);
_info.setDataProvider(dataProvider);
_info.setObservationTime(observationTime);
_info.setExternalIdBundle(ExternalIdBundleWithDates.EMPTY);
_info.setTimeSeriesObjectId(uniqueId.getObjectId().withValue(DATA_POINT_PREFIX + uniqueId.getValue()));
HistoricalTimeSeriesInfoDocument doc = new HistoricalTimeSeriesInfoDocument(_info);
doc.setVersionFromInstant(DbDateUtils.fromSqlTimestamp(versionFrom));
doc.setVersionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(versionTo));
doc.setCorrectionFromInstant(DbDateUtils.fromSqlTimestamp(correctionFrom));
doc.setCorrectionToInstant(DbDateUtils.fromSqlTimestampNullFarFuture(correctionTo));
_documents.add(doc);
}
}
}
Diff Result
No diff
Case 73 - java_ogplatform.rev_c6191_9fafc..YieldCurveFittingSetup.java
Base
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static com.opengamma.math.interpolation.Interpolator1DFactory.FLAT_EXTRAPOLATOR;
import static com.opengamma.math.interpolation.Interpolator1DFactory.LINEAR_EXTRAPOLATOR;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.ParRateCurveSensitivityCalculator;
import com.opengamma.financial.interestrate.ParRateDifferenceCalculator;
import com.opengamma.financial.interestrate.PresentValueCalculator;
import com.opengamma.financial.interestrate.PresentValueSensitivityCalculator;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_CALCULATOR = ParRateCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator
.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator
.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PV_CALCULATOR = PresentValueCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator
.getInstance();
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
protected InterestRateDerivativeVisitor<Double> _marketValueCalculator = null;
protected InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> _marketValueSensitivityCalculator = null;
protected String _interolatorName = null;
protected Interpolator1D<? extends Interpolator1DDataBundle> EXTRAPOLATOR;
protected Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> EXTRAPOLATOR_WITH_SENSITIVITY;
protected Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> EXTRAPOLATOR_WITH_FD_SENSITIVITY;
protected List<InterestRateDerivative> SINGLE_CURVE_INSTRUMENTS;
protected List<InterestRateDerivative> DOUBLE_CURVE_INSTRUMENTS;
protected double[] _marketRates;
protected final double[] _knotPoints = null;
protected DoubleMatrix1D _startPosition = null;
MultipleYieldCurveFinderDataBundle _yieldFinderData;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> SINGLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> DOUBLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected final String _curve1Name = "Curve 1";
protected final String _curve2Name = "Curve 2";
protected YieldAndDiscountCurve _curve1;
protected YieldAndDiscountCurve _curve2;
protected double[] _curve1Knots = null;
protected double[] _curve2Knots = null;
protected double[] _curve1Yields = null;
protected double[] _curve2Yields = null;
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR,
FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory
.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory
.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
protected abstract void setupSingleCurveInstruments();
protected abstract void setupDoubleCurveInstruments();
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(
List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators);
}
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(
List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators);
}
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS,
EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data,
_marketValueSensitivityCalculator);
}
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS,
EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data,
_marketValueSensitivityCalculator);
}
public void testRootFindingMethods(NewtonVectorRootFinder rootFinder, String name) {
final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> JacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, JacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, JacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> JacobianFunction, final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, JacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, JacobianFunction);
}
}
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes
.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
public void testSingleCurveJacobian(DoubleMatrix1D position) {
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(SINGLE_CURVE_FINDER);
final DoubleMatrix2D jacExact = SINGLE_CURVE_JACOBIAN.evaluate(position);
final DoubleMatrix2D jacFDSensitivity = SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
public void testDoubleCurveJacobian(DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times,
final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
protected static FixedFloatSwap setupSwap(final double time, final String fundCurveName, final String liborCurveName) {
final int index = (int) Math.round(2 * time);
return setupSwap(index, fundCurveName, liborCurveName);
}
/**
*
* @param payments
* @param fundingCurveName
* @param liborCurveName
* @return
*/
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName,
final String liborCurveName) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
floating[2 * i + 1] = fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static com.opengamma.math.interpolation.Interpolator1DFactory.FLAT_EXTRAPOLATOR;
import static com.opengamma.math.interpolation.Interpolator1DFactory.LINEAR_EXTRAPOLATOR;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.ParRateCurveSensitivityCalculator;
import com.opengamma.financial.interestrate.ParRateDifferenceCalculator;
import com.opengamma.financial.interestrate.PresentValueCalculator;
import com.opengamma.financial.interestrate.PresentValueSensitivityCalculator;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_CALCULATOR = ParRateCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator
.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator
.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PV_CALCULATOR = PresentValueCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator
.getInstance();
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
protected InterestRateDerivativeVisitor<Double> _marketValueCalculator = null;
protected InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> _marketValueSensitivityCalculator = null;
protected String _interolatorName = null;
protected Interpolator1D<? extends Interpolator1DDataBundle> EXTRAPOLATOR;
protected Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> EXTRAPOLATOR_WITH_SENSITIVITY;
protected Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> EXTRAPOLATOR_WITH_FD_SENSITIVITY;
protected List<InterestRateDerivative> SINGLE_CURVE_INSTRUMENTS;
protected List<InterestRateDerivative> DOUBLE_CURVE_INSTRUMENTS;
protected double[] _marketRates;
protected final double[] _knotPoints = null;
protected DoubleMatrix1D _startPosition = null;
MultipleYieldCurveFinderDataBundle _yieldFinderData;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> SINGLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> DOUBLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected final String _curve1Name = "Curve 1";
protected final String _curve2Name = "Curve 2";
protected YieldAndDiscountCurve _curve1;
protected YieldAndDiscountCurve _curve2;
protected double[] _curve1Knots = null;
protected double[] _curve2Knots = null;
protected double[] _curve1Yields = null;
protected double[] _curve2Yields = null;
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR,
FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory
.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory
.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
protected abstract void setupSingleCurveInstruments();
protected abstract void setupDoubleCurveInstruments();
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(
List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators);
}
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(
List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators);
}
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS,
EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data,
_marketValueSensitivityCalculator);
}
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS,
EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data,
_marketValueSensitivityCalculator);
}
public void testRootFindingMethods(NewtonVectorRootFinder rootFinder, String name) {
final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> JacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, JacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, JacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> JacobianFunction, final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, JacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, JacobianFunction);
}
}
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder,
final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes
.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
public void testSingleCurveJacobian(DoubleMatrix1D position) {
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(SINGLE_CURVE_FINDER);
final DoubleMatrix2D jacExact = SINGLE_CURVE_JACOBIAN.evaluate(position);
final DoubleMatrix2D jacFDSensitivity = SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
public void testDoubleCurveJacobian(DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times,
final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
protected static FixedFloatSwap setupSwap(final double time, final String fundCurveName, final String liborCurveName) {
final int index = (int) Math.round(2 * time);
return setupSwap(index, fundCurveName, liborCurveName);
}
/**
*
* @param payments
* @param fundingCurveName
* @param liborCurveName
* @return
*/
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName,
final String liborCurveName) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
floating[2 * i + 1] = fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
Left
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static com.opengamma.math.interpolation.Interpolator1DFactory.FLAT_EXTRAPOLATOR;
import static com.opengamma.math.interpolation.Interpolator1DFactory.LINEAR_EXTRAPOLATOR;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.ParRateCurveSensitivityCalculator;
import com.opengamma.financial.interestrate.ParRateDifferenceCalculator;
import com.opengamma.financial.interestrate.PresentValueCalculator;
import com.opengamma.financial.interestrate.PresentValueSensitivityCalculator;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_CALCULATOR = ParRateCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PV_CALCULATOR = PresentValueCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
protected InterestRateDerivativeVisitor<Double> _marketValueCalculator = null;
protected InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> _marketValueSensitivityCalculator = null;
protected String _interolatorName = null;
protected Interpolator1D<? extends Interpolator1DDataBundle> EXTRAPOLATOR;
protected Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> EXTRAPOLATOR_WITH_SENSITIVITY;
protected Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> EXTRAPOLATOR_WITH_FD_SENSITIVITY;
protected List<InterestRateDerivative> SINGLE_CURVE_INSTRUMENTS;
protected List<InterestRateDerivative> DOUBLE_CURVE_INSTRUMENTS;
protected double[] _marketRates;
protected final double[] _knotPoints = null;
protected DoubleMatrix1D _startPosition = null;
MultipleYieldCurveFinderDataBundle _yieldFinderData;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> SINGLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> DOUBLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected final String _curve1Name = "Curve 1";
protected final String _curve2Name = "Curve 2";
protected YieldAndDiscountCurve _curve1;
protected YieldAndDiscountCurve _curve2;
protected double[] _curve1Knots = null;
protected double[] _curve2Knots = null;
protected double[] _curve1Yields = null;
protected double[] _curve2Yields = null;
//CSON
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
protected abstract void setupSingleCurveInstruments();
protected abstract void setupDoubleCurveInstruments();
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, jacobianFunction);
}
}
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
public void testSingleCurveJacobian(DoubleMatrix1D position) {
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(SINGLE_CURVE_FINDER);
final DoubleMatrix2D jacExact = SINGLE_CURVE_JACOBIAN.evaluate(position);
final DoubleMatrix2D jacFDSensitivity = SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
}
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
protected static FixedFloatSwap setupSwap(final double time, final String fundCurveName, final String liborCurveName) {
final int index = (int) Math.round(2 * time);
return setupSwap(index, fundCurveName, liborCurveName);
}
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName, final String liborCurveName) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static com.opengamma.math.interpolation.Interpolator1DFactory.FLAT_EXTRAPOLATOR;
import static com.opengamma.math.interpolation.Interpolator1DFactory.LINEAR_EXTRAPOLATOR;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.ParRateCurveSensitivityCalculator;
import com.opengamma.financial.interestrate.ParRateDifferenceCalculator;
import com.opengamma.financial.interestrate.PresentValueCalculator;
import com.opengamma.financial.interestrate.PresentValueSensitivityCalculator;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_CALCULATOR = ParRateCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PV_CALCULATOR = PresentValueCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
protected InterestRateDerivativeVisitor<Double> _marketValueCalculator = null;
protected InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> _marketValueSensitivityCalculator = null;
protected String _interolatorName = null;
protected Interpolator1D<? extends Interpolator1DDataBundle> EXTRAPOLATOR;
protected Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> EXTRAPOLATOR_WITH_SENSITIVITY;
protected Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> EXTRAPOLATOR_WITH_FD_SENSITIVITY;
protected List<InterestRateDerivative> SINGLE_CURVE_INSTRUMENTS;
protected List<InterestRateDerivative> DOUBLE_CURVE_INSTRUMENTS;
protected double[] _marketRates;
protected final double[] _knotPoints = null;
protected DoubleMatrix1D _startPosition = null;
MultipleYieldCurveFinderDataBundle _yieldFinderData;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> SINGLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> DOUBLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected final String _curve1Name = "Curve 1";
protected final String _curve2Name = "Curve 2";
protected YieldAndDiscountCurve _curve1;
protected YieldAndDiscountCurve _curve2;
protected double[] _curve1Knots = null;
protected double[] _curve2Knots = null;
protected double[] _curve1Yields = null;
protected double[] _curve2Yields = null;
//CSON
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
protected abstract void setupSingleCurveInstruments();
protected abstract void setupDoubleCurveInstruments();
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, jacobianFunction);
}
}
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
public void testSingleCurveJacobian(DoubleMatrix1D position) {
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(SINGLE_CURVE_FINDER);
final DoubleMatrix2D jacExact = SINGLE_CURVE_JACOBIAN.evaluate(position);
final DoubleMatrix2D jacFDSensitivity = SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
}
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
protected static FixedFloatSwap setupSwap(final double time, final String fundCurveName, final String liborCurveName) {
final int index = (int) Math.round(2 * time);
return setupSwap(index, fundCurveName, liborCurveName);
}
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName, final String liborCurveName) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
Right
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
}
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
timer.finished();
}
}
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
} else {
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
}
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times,
final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
// protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
// VariableAnnuity floatingLeg = swap.getFloatingLeg();
// ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
// ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
// fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
// return new FixedFloatSwap(newLeg, floatingLeg);
// }
/**
*
* @param payments
* @param fundingCurveName
* @param liborCurveName
* @return
*/
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
InterestRateDerivative ird = new FixedFloatSwap(fixedLeg, floatingLeg);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
}
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
timer.finished();
}
}
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
} else {
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
}
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times,
final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
// protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
// VariableAnnuity floatingLeg = swap.getFloatingLeg();
// ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
// ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
// fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
// return new FixedFloatSwap(newLeg, floatingLeg);
// }
/**
*
* @param payments
* @param fundingCurveName
* @param liborCurveName
* @return
*/
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
InterestRateDerivative ird = new FixedFloatSwap(fixedLeg, floatingLeg);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
//CSON
<<<<<<< MINE
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, jacobianFunction);
}
}
<<<<<<< MINE
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
=======
>>>>>>> YOURS
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
}
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
timer.finished();
}
}
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
} else {
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
}
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
<<<<<<< MINE
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
=======
>>>>>>> YOURS
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
//CSON
<<<<<<< MINE
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, jacobianFunction);
}
}
<<<<<<< MINE
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
=======
>>>>>>> YOURS
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
}
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
timer.finished();
}
}
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
} else {
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
}
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
<<<<<<< MINE
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
=======
>>>>>>> YOURS
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
//CSON
<<<<<<< MINE
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, jacobianFunction);
}
}
<<<<<<< MINE
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
=======
>>>>>>> YOURS
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
}
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
timer.finished();
}
}
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
} else {
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
}
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
<<<<<<< MINE
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
=======
>>>>>>> YOURS
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName, final String liborCurveName) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
// protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
// VariableAnnuity floatingLeg = swap.getFloatingLeg();
// ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
// ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
// fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
// return new FixedFloatSwap(newLeg, floatingLeg);
// }
/**
*
* @param payments
* @param fundingCurveName
* @param liborCurveName
* @return
*/
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
InterestRateDerivative ird = new FixedFloatSwap(fixedLeg, floatingLeg);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
//CSON
<<<<<<< MINE
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, jacobianFunction);
}
}
<<<<<<< MINE
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
=======
>>>>>>> YOURS
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
}
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
timer.finished();
}
}
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
} else {
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
}
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
<<<<<<< MINE
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
=======
>>>>>>> YOURS
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName, final String liborCurveName) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
// protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
// VariableAnnuity floatingLeg = swap.getFloatingLeg();
// ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
// ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
// fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
// return new FixedFloatSwap(newLeg, floatingLeg);
// }
/**
*
* @param payments
* @param fundingCurveName
* @param liborCurveName
* @return
*/
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
InterestRateDerivative ird = new FixedFloatSwap(fixedLeg, floatingLeg);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
Safe
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
//CSON
<<<<<<< MINE
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
<<<<<<< MINE
=======
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
>>>>>>> YOURS
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, jacobianFunction);
}
}
<<<<<<< MINE
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
=======
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
>>>>>>> YOURS
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
}
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
timer.finished();
}
}
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
} else {
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
}
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
<<<<<<< MINE
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
InterestRateDerivative ird = new FixedFloatSwap(fixedLeg, floatingLeg);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
=======
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName, final String liborCurveName) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
>>>>>>> YOURS
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
//CSON
<<<<<<< MINE
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
=======
>>>>>>> YOURS
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
<<<<<<< MINE
=======
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
>>>>>>> YOURS
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
}
timer.finished();
}
}
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
} else {
doTestForSingleCurve(rootFinder, function, jacobianFunction);
}
}
<<<<<<< MINE
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
=======
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
// for (int i = 0; i < FUNDING_CURVE_TIMES.length; i++) {
// assertEquals(FUNDING_YIELDS[i], fundingYields[i], EPS);
// }
// for (int i = 0; i < LIBOR_CURVE_TIMES.length; i++) {
// assertEquals(LIBOR_YIELDS[i], liborYields[i], EPS);
// }
// //
// final YieldAndDiscountCurve fundingCurve = makeYieldCurve(fundingYields, _curve1Knots, EXTRAPOLATOR);
// final YieldAndDiscountCurve liborCurve = makeYieldCurve(liborYields, _curve2Knots, EXTRAPOLATOR);
// final YieldCurveBundle bundle = new YieldCurveBundle();
// bundle.setCurve(_curve1Name, liborCurve);
// bundle.setCurve(_curve2Name, fundingCurve);
//
// for (int i = 0; i < _marketRates.length; i++) {
// assertEquals(_marketRates[i], PAR_RATE_CALCULATOR.getValue(DOUBLE_CURVE_INSTRUMENTS.get(i), bundle), EPS);
// }
}
>>>>>>> YOURS
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators = new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
}
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
for (int i = 0; i < _hotspotWarmupCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
doTestForCurveFinding(rootFinder, data);
}
timer.finished();
}
}
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
} else {
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
}
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
<<<<<<< MINE
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
=======
>>>>>>> YOURS
<<<<<<< MINE
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
InterestRateDerivative ird = new FixedFloatSwap(fixedLeg, floatingLeg);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
}
=======
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName, final String liborCurveName) {
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
}
>>>>>>> YOURS
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
}
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
Unstructured
/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_CALCULATOR = ParRateCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PV_CALCULATOR = PresentValueCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
<<<<<<< MINE
protected List<InterestRateDerivative> SINGLE_CURVE_INSTRUMENTS;
protected List<InterestRateDerivative> DOUBLE_CURVE_INSTRUMENTS;
protected double[] _marketRates;
protected final double[] _knotPoints = null;
protected DoubleMatrix1D _startPosition = null;
MultipleYieldCurveFinderDataBundle _yieldFinderData;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> SINGLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> DOUBLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected final String _curve1Name = "Curve 1";
protected final String _curve2Name = "Curve 2";
protected YieldAndDiscountCurve _curve1;
protected YieldAndDiscountCurve _curve2;
protected double[] _curve1Knots = null;
protected double[] _curve2Knots = null;
protected double[] _curve1Yields = null;
protected double[] _curve2Yields = null;
//CSON
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
protected abstract void setupSingleCurveInstruments();
protected abstract void setupDoubleCurveInstruments();
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
=======
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
>>>>>>> YOURS
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
<<<<<<< MINE
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
=======
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
>>>>>>> YOURS
}
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
=======
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
>>>>>>> YOURS
for (int i = 0; i < _hotspotWarmupCycles; i++) {
<<<<<<< MINE
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
=======
doTestForCurveFinding(rootFinder, data);
>>>>>>> YOURS
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
<<<<<<< MINE
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
=======
doTestForCurveFinding(rootFinder, data);
>>>>>>> YOURS
}
timer.finished();
}
}
<<<<<<< MINE
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
=======
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
>>>>>>> YOURS
} else {
<<<<<<< MINE
doTestForSingleCurve(rootFinder, function, jacobianFunction);
=======
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
>>>>>>> YOURS
}
<<<<<<< MINE
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
=======
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
>>>>>>> YOURS
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
<<<<<<< MINE
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
=======
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
>>>>>>> YOURS
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
<<<<<<< MINE
}
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
=======
>>>>>>> YOURS
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
<<<<<<< MINE
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
=======
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
>>>>>>> YOURS
}
<<<<<<< MINE
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
=======
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
>>>>>>> YOURS
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
// protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
// VariableAnnuity floatingLeg = swap.getFloatingLeg();
// ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
// ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
// fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
// return new FixedFloatSwap(newLeg, floatingLeg);
// }
<<<<<<< MINE
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName, final String liborCurveName) {
=======
/**
*
* @param payments
* @param fundingCurveName
* @param liborCurveName
* @return
*/
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
>>>>>>> YOURS
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
<<<<<<< MINE
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
=======
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
InterestRateDerivative ird = new FixedFloatSwap(fixedLeg, floatingLeg);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
>>>>>>> YOURS
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}/**
* Copyright (C) 2009 - 2010 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.math.rootfinding;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import cern.jet.random.engine.MersenneTwister64;
import cern.jet.random.engine.RandomEngine;
import com.opengamma.financial.interestrate.InterestRateDerivative;
import com.opengamma.financial.interestrate.InterestRateDerivativeVisitor;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderDataBundle;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderFunction;
import com.opengamma.financial.interestrate.MultipleYieldCurveFinderJacobian;
import com.opengamma.financial.interestrate.ParRateCalculator;
import com.opengamma.financial.interestrate.YieldCurveBundle;
import com.opengamma.financial.interestrate.annuity.definition.ConstantCouponAnnuity;
import com.opengamma.financial.interestrate.annuity.definition.VariableAnnuity;
import com.opengamma.financial.interestrate.cash.definition.Cash;
import com.opengamma.financial.interestrate.fra.definition.ForwardRateAgreement;
import com.opengamma.financial.interestrate.future.definition.InterestRateFuture;
import com.opengamma.financial.interestrate.libor.definition.Libor;
import com.opengamma.financial.interestrate.swap.definition.FixedFloatSwap;
import com.opengamma.financial.model.interestrate.curve.InterpolatedYieldCurve;
import com.opengamma.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.math.differentiation.VectorFieldFirstOrderDifferentiator;
import com.opengamma.math.function.Function1D;
import com.opengamma.math.interpolation.Interpolator1D;
import com.opengamma.math.interpolation.data.Interpolator1DDataBundle;
import com.opengamma.math.interpolation.sensitivity.Interpolator1DNodeSensitivityCalculator;
import com.opengamma.math.matrix.DoubleMatrix1D;
import com.opengamma.math.matrix.DoubleMatrix2D;
import com.opengamma.math.rootfinding.YieldCurveFittingTestDataBundle.TestType;
import com.opengamma.math.rootfinding.newton.NewtonVectorRootFinder;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.DoublesPair;
/**
*
*/
public abstract class YieldCurveFittingSetup {
//CSOFF
protected static final RandomEngine RANDOM = new MersenneTwister64(MersenneTwister64.DEFAULT_SEED);
<<<<<<< MINE
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_CALCULATOR = ParRateCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PAR_RATE_DIFFERENCE_CALCULATOR = ParRateDifferenceCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PAR_RATE_SENSITIVITY_CALCULATOR = ParRateCurveSensitivityCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Double> PV_CALCULATOR = PresentValueCalculator.getInstance();
protected static final InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> PV_SENSITIVITY_CALCULATOR = PresentValueSensitivityCalculator.getInstance();
=======
>>>>>>> YOURS
protected static final double EPS = 1e-8;
protected static final int STEPS = 100;
protected Logger _logger = null;
protected int _hotspotWarmupCycles;
protected int _benchmarkCycles;
protected YieldCurveFittingTestDataBundle getYieldCurveFittingTestDataBundle(
List<InterestRateDerivative> instruments, final YieldCurveBundle knownCurves, final List<String> curveNames,
final List<double[]> curvesKnots, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense,
InterestRateDerivativeVisitor<Double> marketValueCalculator,
InterestRateDerivativeVisitor<Map<String, List<DoublesPair>>> marketValueSensitivityCalculator,
double[] marketRates, DoubleMatrix1D startPosition, List<double[]> curveYields) {
Validate.notNull(curveNames);
Validate.notNull(curvesKnots);
Validate.notNull(instruments);
Validate.notNull(extrapolator);
Validate.notNull(extrapolatorWithSense);
<<<<<<< MINE
protected List<InterestRateDerivative> SINGLE_CURVE_INSTRUMENTS;
protected List<InterestRateDerivative> DOUBLE_CURVE_INSTRUMENTS;
protected double[] _marketRates;
protected final double[] _knotPoints = null;
protected DoubleMatrix1D _startPosition = null;
MultipleYieldCurveFinderDataBundle _yieldFinderData;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> SINGLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix1D> DOUBLE_CURVE_FINDER;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected Function1D<DoubleMatrix1D, DoubleMatrix2D> DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY;
protected final String _curve1Name = "Curve 1";
protected final String _curve2Name = "Curve 2";
protected YieldAndDiscountCurve _curve1;
protected YieldAndDiscountCurve _curve2;
protected double[] _curve1Knots = null;
protected double[] _curve2Knots = null;
protected double[] _curve1Yields = null;
protected double[] _curve2Yields = null;
//CSON
protected void setupExtrapolator() {
SINGLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
DOUBLE_CURVE_INSTRUMENTS = new ArrayList<InterestRateDerivative>();
EXTRAPOLATOR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR);
EXTRAPOLATOR_WITH_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, false);
EXTRAPOLATOR_WITH_FD_SENSITIVITY = CombinedInterpolatorExtrapolatorNodeSensitivityCalculatorFactory.getSensitivityCalculator(_interolatorName, LINEAR_EXTRAPOLATOR, FLAT_EXTRAPOLATOR, true);
}
protected abstract void setupSingleCurveInstruments();
protected abstract void setupDoubleCurveInstruments();
protected MultipleYieldCurveFinderDataBundle getSingleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
=======
int n = curveNames.size();
Validate.isTrue(n == curvesKnots.size());
int count = 0;
for (int i = 0; i < n; i++) {
Validate.notNull(curvesKnots.get(i));
count += curvesKnots.get(i).length;
}
Validate.isTrue(count == instruments.size());
>>>>>>> YOURS
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
<<<<<<< MINE
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
=======
for (int i = 0; i < n; i++) {
unknownCurveInterpolators.put(curveNames.get(i), extrapolator);
unknownCurveNodes.put(curveNames.get(i), curvesKnots.get(i));
unknownCurveNodeSensitivityCalculators.put(curveNames.get(i), extrapolatorWithSense);
}
if (curveYields == null) {
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes,
unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators, marketValueCalculator,
marketValueSensitivityCalculator, marketRates, startPosition);
}
Validate.isTrue(curveYields.size() == n, "wrong number of true yields");
HashMap<String, double[]> yields = new HashMap<String, double[]>();
for (int i = 0; i < n; i++) {
yields.put(curveNames.get(i), curveYields.get(i));
}
return new YieldCurveFittingTestDataBundle(instruments, knownCurves, unknownCurveNodes, unknownCurveInterpolators,
unknownCurveNodeSensitivityCalculators, marketValueCalculator, marketValueSensitivityCalculator, marketRates,
startPosition, yields);
>>>>>>> YOURS
}
<<<<<<< MINE
protected MultipleYieldCurveFinderDataBundle getDoubleYieldCurveFinderDataBundle(List<InterestRateDerivative> instruments, final Interpolator1D<? extends Interpolator1DDataBundle> extrapolator,
final Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle> extrapolatorWithSense) {
LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>> unknownCurveInterpolators = new LinkedHashMap<String, Interpolator1D<? extends Interpolator1DDataBundle>>();
LinkedHashMap<String, double[]> unknownCurveNodes = new LinkedHashMap<String, double[]>();
LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>> unknownCurveNodeSensitivityCalculators =
new LinkedHashMap<String, Interpolator1DNodeSensitivityCalculator<? extends Interpolator1DDataBundle>>();
unknownCurveInterpolators.put(_curve1Name, extrapolator);
unknownCurveInterpolators.put(_curve2Name, extrapolator);
unknownCurveNodes.put(_curve1Name, _curve1Knots);
unknownCurveNodes.put(_curve2Name, _curve2Knots);
unknownCurveNodeSensitivityCalculators.put(_curve1Name, extrapolatorWithSense);
unknownCurveNodeSensitivityCalculators.put(_curve2Name, extrapolatorWithSense);
return new MultipleYieldCurveFinderDataBundle(instruments, null, unknownCurveNodes, unknownCurveInterpolators, unknownCurveNodeSensitivityCalculators);
}
protected void setupSingleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
SINGLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
SINGLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getSingleYieldCurveFinderDataBundle(SINGLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
protected void setupDoubleCurveFinder() {
MultipleYieldCurveFinderDataBundle data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_SENSITIVITY);
DOUBLE_CURVE_FINDER = new MultipleYieldCurveFinderFunction(data, _marketValueCalculator);
DOUBLE_CURVE_JACOBIAN = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
data = getDoubleYieldCurveFinderDataBundle(DOUBLE_CURVE_INSTRUMENTS, EXTRAPOLATOR, EXTRAPOLATOR_WITH_FD_SENSITIVITY);
DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY = new MultipleYieldCurveFinderJacobian(data, _marketValueSensitivityCalculator);
}
public void testRootFindingMethods(@SuppressWarnings("unused") NewtonVectorRootFinder rootFinder, @SuppressWarnings("unused") String name) {
//final VectorFieldFirstOrderDifferentiator fd_jac_calculator = new VectorFieldFirstOrderDifferentiator();
// doHotSpot(rootFinder, name + ", single curve", SINGLE_CURVE_FINDER, SINGLE_CURVE_JACOBIAN);
// doHotSpot(rootFinder, name + ", single curve, finite difference", SINGLE_CURVE_FINDER, fd_jac_calculator
// .derivative(SINGLE_CURVE_FINDER));
// doHotSpot(rootFinder, name + ", single curve FD interpolator sensitivity", SINGLE_CURVE_FINDER,
// SINGLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY);
// doHotSpot(rootFinder, name + ", double curve", DOUBLE_CURVE_FINDER, DOUBLE_CURVE_JACOBIAN, true);
// doHotSpot(rootFinder, name + ", double curve, finite difference", DOUBLE_CURVE_FINDER, fd_jac_calculator
// .derivative(DOUBLE_CURVE_FINDER), true);
// doHotSpot(rootFinder, name + ", double curve FD interpolator sensitivity", DOUBLE_CURVE_FINDER,
// DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY, true);
}
@SuppressWarnings("unused")
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> functor,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction) {
doHotSpot(rootFinder, name, functor, jacobianFunction, false);
}
private void doHotSpot(final NewtonVectorRootFinder rootFinder, final String name, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function,
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction, final Boolean doubleCurveTest) {
=======
public void doHotSpot(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data, final String name) {
>>>>>>> YOURS
for (int i = 0; i < _hotspotWarmupCycles; i++) {
<<<<<<< MINE
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
=======
doTestForCurveFinding(rootFinder, data);
>>>>>>> YOURS
}
if (_benchmarkCycles > 0) {
final OperationTimer timer = new OperationTimer(_logger, "processing {} cycles on " + name, _benchmarkCycles);
for (int i = 0; i < _benchmarkCycles; i++) {
<<<<<<< MINE
doTest(rootFinder, function, jacobianFunction, doubleCurveTest);
=======
doTestForCurveFinding(rootFinder, data);
>>>>>>> YOURS
}
timer.finished();
}
}
<<<<<<< MINE
private void doTest(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> function, final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFunction,
final Boolean doubleCurveTest) {
if (doubleCurveTest) {
doTestForDoubleCurve(rootFinder, function, jacobianFunction);
=======
private void doTestForCurveFinding(final NewtonVectorRootFinder rootFinder, YieldCurveFittingTestDataBundle data) {
Function1D<DoubleMatrix1D, DoubleMatrix1D> func = new MultipleYieldCurveFinderFunction(data, data
.getMarketValueCalculator());
Function1D<DoubleMatrix1D, DoubleMatrix2D> jac = null;
if (data.getTestType() == TestType.ANALYTIC_JACOBIAN) {
jac = new MultipleYieldCurveFinderJacobian(data, data.getMarketValueSensitivityCalculator());
} else if (data.getTestType() == TestType.FD_JACOBIAN) {
final VectorFieldFirstOrderDifferentiator fdJacCalculator = new VectorFieldFirstOrderDifferentiator();
jac = fdJacCalculator.derivative(func);
>>>>>>> YOURS
} else {
<<<<<<< MINE
doTestForSingleCurve(rootFinder, function, jacobianFunction);
=======
throw new IllegalArgumentException("unknown TestType " + data.getTestType());
>>>>>>> YOURS
}
<<<<<<< MINE
private void doTestForSingleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
}
private void doTestForDoubleCurve(final NewtonVectorRootFinder rootFinder, final Function1D<DoubleMatrix1D, DoubleMatrix1D> f, final Function1D<DoubleMatrix1D, DoubleMatrix2D> j) {
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(f, j, _startPosition);
final DoubleMatrix1D modelMarketValueDiff = f.evaluate(yieldCurveNodes);
=======
final DoubleMatrix1D yieldCurveNodes = rootFinder.getRoot(func, jac, data.getStartPosition());
final DoubleMatrix1D modelMarketValueDiff = func.evaluate(yieldCurveNodes);
>>>>>>> YOURS
for (int i = 0; i < modelMarketValueDiff.getNumberOfElements(); i++) {
assertEquals(0.0, modelMarketValueDiff.getEntry(i), EPS);
}
<<<<<<< MINE
@SuppressWarnings("unused")
final double[] fundingYields = Arrays.copyOfRange(yieldCurveNodes.getData(), 0, _curve1Knots.length);
@SuppressWarnings("unused")
final double[] liborYields = Arrays.copyOfRange(yieldCurveNodes.getData(), _curve1Knots.length, yieldCurveNodes.getNumberOfElements());
=======
HashMap<String, double[]> yields = unpackYieldVector(data, yieldCurveNodes);
>>>>>>> YOURS
final YieldCurveBundle bundle = new YieldCurveBundle();
for (String name : data.getCurveNames()) {
YieldAndDiscountCurve curve = makeYieldCurve(yields.get(name), data.getCurveNodePointsForCurve(name), data
.getInterpolatorForCurve(name));
bundle.setCurve(name, curve);
}
if (data.getKnownCurves() != null) {
bundle.addAll(data.getKnownCurves());
}
//this is possibly a redundant test, especially if we are working in par-rate space (vs present value) as the very fact that
//the root finder converged (and modelMarketValueDiff are within EPS of 0) means this will also pass
for (int i = 0; i < data.getMarketRates().length; i++) {
assertEquals(data.getMarketRates()[i], ParRateCalculator.getInstance().getValue(data.getDerivative(i), bundle),
EPS);
}
//this test cannot be performed when we don't know what the true yield curves are - i.e. we start from market data
if (data.getCurveYields() != null) {
for (String name : data.getCurveNames()) {
double[] trueYields = data.getCurveYields().get(name);
double[] fittedYields = yields.get(name);
for (int i = 0; i < trueYields.length; i++) {
assertEquals(trueYields[i], fittedYields[i], EPS);
}
}
}
}
private HashMap<String, double[]> unpackYieldVector(YieldCurveFittingTestDataBundle data,
DoubleMatrix1D yieldCurveNodes) {
HashMap<String, double[]> res = new HashMap<String, double[]>();
int start = 0;
int end = 0;
for (String name : data.getCurveNames()) {
end += data.getCurveNodePointsForCurve(name).length;
double[] temp = Arrays.copyOfRange(yieldCurveNodes.getData(), start, end);
res.put(name, temp);
start = end;
}
return res;
}
public void testJacobian(YieldCurveFittingTestDataBundle data) {
MultipleYieldCurveFinderFunction func = new MultipleYieldCurveFinderFunction(data, data.getMarketValueCalculator());
MultipleYieldCurveFinderJacobian jac = new MultipleYieldCurveFinderJacobian(data, data
.getMarketValueSensitivityCalculator());
final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(func);
final DoubleMatrix2D jacExact = jac.evaluate(data.getStartPosition());
final DoubleMatrix2D jacFD = jacobianFD.evaluate(data.getStartPosition());
assertMatrixEquals(jacExact, jacFD, 1e-6);
<<<<<<< MINE
}
public void testDoubleCurveJacobian(@SuppressWarnings("unused") DoubleMatrix1D position) {
// final VectorFieldFirstOrderDifferentiator fdCal = new VectorFieldFirstOrderDifferentiator();
// final Function1D<DoubleMatrix1D, DoubleMatrix2D> jacobianFD = fdCal.derivative(DOUBLE_CURVE_FINDER);
// final DoubleMatrix2D jacExact = DOUBLE_CURVE_JACOBIAN.evaluate(position);
// final DoubleMatrix2D jacFDSensitivity = DOUBLE_CURVE_JACOBIAN_WITH_FD_INTERPOLATOR_SENSITIVITY.evaluate(position);
// final DoubleMatrix2D jacFD = jacobianFD.evaluate(position);
// assertMatrixEquals(jacExact, jacFDSensitivity, 1e-6);
// assertMatrixEquals(jacExact, jacFD, 1e-6);
=======
>>>>>>> YOURS
}
protected static YieldAndDiscountCurve makeYieldCurve(final double[] yields, final double[] times, final Interpolator1D<? extends Interpolator1DDataBundle> interpolator) {
final int n = yields.length;
if (n != times.length) {
throw new IllegalArgumentException("rates and times different lengths");
}
return new InterpolatedYieldCurve(times, yields, interpolator);
}
<<<<<<< MINE
protected static MultipleYieldCurveFinderDataBundle upDateInstruments(MultipleYieldCurveFinderDataBundle old, final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(), old.getUnknownCurveInterpolators(), old
.getUnknownCurveNodeSensitivityCalculators());
=======
protected static MultipleYieldCurveFinderDataBundle updateInstruments(MultipleYieldCurveFinderDataBundle old,
final List<InterestRateDerivative> instruments) {
return new MultipleYieldCurveFinderDataBundle(instruments, old.getKnownCurves(), old.getUnknownCurveNodePoints(),
old.getUnknownCurveInterpolators(), old.getUnknownCurveNodeSensitivityCalculators());
>>>>>>> YOURS
}
<<<<<<< MINE
protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
VariableAnnuity floatingLeg = swap.getFloatingLeg();
ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate, fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
=======
protected static InterestRateDerivative makeIRD(String type, final double maturity, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
if ("cash".equals(type)) {
return makeCash(maturity, fundCurveName, curves);
} else if ("libor".equals(type)) {
return makeLibor(maturity, indexCurveName, curves);
} else if ("fra".equals(type)) {
return makeFRA(maturity, fundCurveName, indexCurveName, curves);
} else if ("future".equals(type)) {
return makeFutrure(maturity, indexCurveName, curves);
} else if ("swap".equals(type)) {
return makeSwap(maturity, fundCurveName, indexCurveName, curves);
}
throw new IllegalArgumentException("unknown IRD type " + type);
>>>>>>> YOURS
}
protected static InterestRateDerivative makeCash(final double time, final String fundCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Cash(time, 0.0, fundCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Cash(time, rate, fundCurveName);
}
protected static InterestRateDerivative makeLibor(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new Libor(time, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new Libor(time, rate, indexCurveName);
}
protected static InterestRateDerivative makeFRA(final double time, final String fundCurveName,
final String indexCurveName, final YieldCurveBundle curves) {
InterestRateDerivative ird = new ForwardRateAgreement(time - 0.25, time, 0.0, fundCurveName, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new ForwardRateAgreement(time - 0.25, time, rate, fundCurveName, indexCurveName);
}
protected static InterestRateDerivative makeFutrure(final double time, final String indexCurveName,
final YieldCurveBundle curves) {
InterestRateDerivative ird = new InterestRateFuture(time, time + 0.25, 0.25, 0.0, indexCurveName);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
return new InterestRateFuture(time, time + 0.25, 0.25, rate, indexCurveName);
}
protected static FixedFloatSwap makeSwap(final double time, final String fundCurveName, final String liborCurveName,
final YieldCurveBundle curves) {
final int index = (int) Math.round(2 * time);
return makeSwap(index, fundCurveName, liborCurveName, curves);
}
// protected static FixedFloatSwap setParSwapRate(FixedFloatSwap swap, double rate) {
// VariableAnnuity floatingLeg = swap.getFloatingLeg();
// ConstantCouponAnnuity fixedLeg = swap.getFixedLeg();
// ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
// fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
// return new FixedFloatSwap(newLeg, floatingLeg);
// }
<<<<<<< MINE
protected static FixedFloatSwap setupSwap(final int payments, final String fundingCurveName, final String liborCurveName) {
=======
/**
*
* @param payments
* @param fundingCurveName
* @param liborCurveName
* @return
*/
protected static FixedFloatSwap makeSwap(final int payments, final String fundingCurveName,
final String liborCurveName, final YieldCurveBundle curves) {
>>>>>>> YOURS
final double[] fixed = new double[payments];
final double[] floating = new double[2 * payments];
final double[] indexFixing = new double[2 * payments];
final double[] indexMaturity = new double[2 * payments];
final double[] yearFrac = new double[2 * payments];
final double sigma = 4.0 / 365.0;
for (int i = 0; i < payments; i++) {
fixed[i] = 0.5 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
floating[2 * i + 1] = fixed[i];
}
for (int i = 0; i < 2 * payments; i++) {
if (i % 2 == 0) {
floating[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
yearFrac[i] = 0.25 + sigma * (RANDOM.nextDouble() - 0.5);
indexFixing[i] = 0.25 * i + sigma * (i == 0 ? RANDOM.nextDouble() / 2 : (RANDOM.nextDouble() - 0.5));
indexMaturity[i] = 0.25 * (1 + i) + sigma * (RANDOM.nextDouble() - 0.5);
}
final ConstantCouponAnnuity fixedLeg = new ConstantCouponAnnuity(fixed, 0.0, fundingCurveName);
<<<<<<< MINE
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0, fundingCurveName, liborCurveName);
return new FixedFloatSwap(fixedLeg, floatingLeg);
=======
final VariableAnnuity floatingLeg = new VariableAnnuity(floating, indexFixing, indexMaturity, yearFrac, 1.0,
fundingCurveName, liborCurveName);
InterestRateDerivative ird = new FixedFloatSwap(fixedLeg, floatingLeg);
double rate = ParRateCalculator.getInstance().getValue(ird, curves);
ConstantCouponAnnuity newLeg = new ConstantCouponAnnuity(fixedLeg.getPaymentTimes(), fixedLeg.getNotional(), rate,
fixedLeg.getYearFractions(), fixedLeg.getFundingCurveName());
return new FixedFloatSwap(newLeg, floatingLeg);
>>>>>>> YOURS
}
protected void assertMatrixEquals(final DoubleMatrix2D m1, final DoubleMatrix2D m2, final double eps) {
final int m = m1.getNumberOfRows();
final int n = m1.getNumberOfColumns();
assertEquals(m2.getNumberOfRows(), m);
assertEquals(m2.getNumberOfColumns(), n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
assertEquals(m1.getEntry(i, j), m2.getEntry(i, j), eps);
}
}
}
}
Diff Result
No diff
Case 74 - java_ogplatform.rev_c890e_0f69c..InMemoryConfigMaster.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.IdUtils;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.paging.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(final ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(final ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(final ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.ofAll(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.IdUtils;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.paging.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(final ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument<T>> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(final ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(final ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.ofAll(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.IdUtils;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.paging.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(final ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(final ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(final ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.ofAll(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.id.IdUtils;
import com.opengamma.id.ObjectId;
import com.opengamma.id.ObjectIdSupplier;
import com.opengamma.id.ObjectIdentifiable;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.master.config.ConfigDocument;
import com.opengamma.master.config.ConfigHistoryRequest;
import com.opengamma.master.config.ConfigHistoryResult;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.ConfigMetaDataRequest;
import com.opengamma.master.config.ConfigMetaDataResult;
import com.opengamma.master.config.ConfigSearchRequest;
import com.opengamma.master.config.ConfigSearchResult;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.paging.Paging;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument<?>> _store = new ConcurrentHashMap<ObjectId, ConfigDocument<?>>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(final ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument<?> document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> add(final ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getName(), "document.name");
ArgumentChecker.notNull(document.getValue(), "document.value");
final Object value = document.getValue();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
IdUtils.setInto(value, uniqueId);
final ConfigDocument<Object> doc = new ConfigDocument<Object>(document.getType());
doc.setName(document.getName());
doc.setValue(value);
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, null, uniqueId, now);
return (ConfigDocument<T>) doc;
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> update(final ConfigDocument<T> document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getValue(), "document.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument<?> storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.UPDATED, uniqueId, document.getUniqueId(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(final UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "uniqueId");
if (_store.remove(uniqueId.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
_changeManager.entityChanged(ChangeType.REMOVED, uniqueId, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigDocument<T> correct(ConfigDocument<T> document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public <T> ConfigHistoryResult<T> history(ConfigHistoryRequest<T> request) {
ArgumentChecker.notNull(request, "request");
ArgumentChecker.notNull(request.getObjectId(), "request.objectId");
ArgumentChecker.notNull(request.getType(), "request.configClazz");
final ConfigHistoryResult<T> result = new ConfigHistoryResult<T>();
final ConfigDocument<T> doc = get(request.getObjectId(), VersionCorrection.LATEST, request.getType());
if (doc != null) {
result.getDocuments().add(doc);
}
result.setPaging(Paging.ofAll(result.getDocuments()));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(UniqueId uniqueId, Class<T> clazz) {
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(uniqueId);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigDocument<T> get(ObjectIdentifiable objectId, VersionCorrection versionCorrection, Class<T> clazz) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
ArgumentChecker.notNull(clazz, "clazz");
ConfigDocument<?> document = get(objectId, versionCorrection);
if (!clazz.isInstance(document.getValue())) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return (ConfigDocument<T>) document;
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configurations by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument> list = new ArrayList<ConfigDocument>();
for (ConfigDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
final ConfigSearchResult result = new ConfigSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configurations by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument> list = new ArrayList<ConfigDocument>();
for (ConfigDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
final ConfigSearchResult result = new ConfigSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
<<<<<<< MINE
import java.util.Collections;
=======
import java.util.*;
>>>>>>> YOURS
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
<<<<<<< MINE
import java.util.Collections;
=======
import java.util.*;
>>>>>>> YOURS
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
<<<<<<< MINE
import java.util.Collections;
=======
import java.util.*;
>>>>>>> YOURS
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument> list = new ArrayList<ConfigDocument>();
for (ConfigDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
final ConfigSearchResult result = new ConfigSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
<<<<<<< MINE
import java.util.Collections;
=======
import java.util.*;
>>>>>>> YOURS
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument> list = new ArrayList<ConfigDocument>();
for (ConfigDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
final ConfigSearchResult result = new ConfigSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
<<<<<<< MINE
import java.util.Collections;
=======
import java.util.*;
>>>>>>> YOURS
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
<<<<<<< MINE
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument> list = new ArrayList<ConfigDocument>();
for (ConfigDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
final ConfigSearchResult result = new ConfigSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
=======
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
>>>>>>> YOURS
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
<<<<<<< MINE
import java.util.Collections;
=======
import java.util.*;
>>>>>>> YOURS
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configuration by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
<<<<<<< MINE
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument> list = new ArrayList<ConfigDocument>();
for (ConfigDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
final ConfigSearchResult result = new ConfigSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
=======
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
>>>>>>> YOURS
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
<<<<<<< MINE
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
=======
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.*;
>>>>>>> YOURS
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configurations by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
<<<<<<< MINE
public ConfigMetaDataResult metaData(final ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
=======
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument> list = new ArrayList<ConfigDocument>();
for (ConfigDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
final ConfigSearchResult result = new ConfigSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master.config.impl;
<<<<<<< MINE
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
=======
import static com.google.common.collect.Maps.newHashMap;
import static com.opengamma.util.functional.Functional.functional;
import java.util.*;
>>>>>>> YOURS
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.time.Instant;
import org.testng.collections.Sets;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.opengamma.DataNotFoundException;
import com.opengamma.core.change.BasicChangeManager;
import com.opengamma.core.change.ChangeManager;
import com.opengamma.core.change.ChangeType;
import com.opengamma.core.config.impl.ConfigItem;
import com.opengamma.id.*;
import com.opengamma.master.MasterUtils;
import com.opengamma.master.config.*;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.functional.Function1;
import com.opengamma.util.paging.Paging;
import com.opengamma.util.paging.PagingRequest;
/**
* A simple, in-memory implementation of {@code ConfigMaster}.
* <p>
* This master does not support versioning of configuration documents.
* <p>
* This implementation does not copy stored elements, making it thread-hostile.
* As such, this implementation is currently most useful for testing scenarios.
*/
public class InMemoryConfigMaster implements ConfigMaster {
/**
* The default scheme used for each {@link ObjectId}.
*/
public static final String DEFAULT_OID_SCHEME = "MemCfg";
/**
* A cache of configurations by identifier.
*/
private final ConcurrentMap<ObjectId, ConfigDocument> _store = new ConcurrentHashMap<ObjectId, ConfigDocument>();
/**
* The supplied of identifiers.
*/
private final Supplier<ObjectId> _objectIdSupplier;
/**
* The change manager.
*/
private final ChangeManager _changeManager;
/**
* Creates an instance.
*/
public InMemoryConfigMaster() {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME));
}
/**
* Creates an instance specifying the change manager.
*
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final ChangeManager changeManager) {
this(new ObjectIdSupplier(InMemoryConfigMaster.DEFAULT_OID_SCHEME), changeManager);
}
/**
* Creates an instance specifying the supplier of object identifiers.
*
* @param objectIdSupplier the supplier of object identifiers, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier) {
this(objectIdSupplier, new BasicChangeManager());
}
/**
* Creates an instance specifying the supplier of object identifiers and change manager.
*
* @param objectIdSupplier the supplier of object identifiers, not null
* @param changeManager the change manager, not null
*/
public InMemoryConfigMaster(final Supplier<ObjectId> objectIdSupplier, final ChangeManager changeManager) {
ArgumentChecker.notNull(objectIdSupplier, "objectIdSupplier");
ArgumentChecker.notNull(changeManager, "changeManager");
_objectIdSupplier = objectIdSupplier;
_changeManager = changeManager;
}
//-------------------------------------------------------------------------
@Override
<<<<<<< MINE
public ConfigMetaDataResult metaData(final ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument<?> doc : _store.values()) {
types.add(doc.getValue().getClass());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <T> ConfigSearchResult<T> search(final ConfigSearchRequest<T> request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument<T>> list = new ArrayList<ConfigDocument<T>>();
for (ConfigDocument<?> doc : _store.values()) {
if (request.matches(doc)) {
list.add((ConfigDocument<T>) doc);
}
}
Collections.sort(list, request.getSortOrder());
final ConfigSearchResult<T> result = new ConfigSearchResult<T>();
result.setPaging(Paging.of(request.getPagingRequest(), list));
result.getDocuments().addAll(request.getPagingRequest().select(list));
return result;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final UniqueId uniqueId) {
return get(uniqueId, VersionCorrection.LATEST);
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument<?> get(final ObjectIdentifiable objectId, final VersionCorrection versionCorrection) {
=======
public ConfigDocument get(ObjectIdentifiable objectId, VersionCorrection versionCorrection) {
>>>>>>> YOURS
ArgumentChecker.notNull(objectId, "objectId");
ArgumentChecker.notNull(versionCorrection, "versionCorrection");
final ConfigDocument document = _store.get(objectId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + objectId);
}
return document;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument add(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getName(), "document.object.name");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final ConfigItem item = document.getObject();
final ObjectId objectId = _objectIdSupplier.get();
final UniqueId uniqueId = objectId.atVersion("");
final Instant now = Instant.now();
item.setUniqueId(uniqueId);
final ConfigDocument doc = new ConfigDocument(item);
doc.setObject(document.getObject());
doc.setUniqueId(uniqueId);
doc.setVersionFromInstant(now);
_store.put(objectId, doc);
_changeManager.entityChanged(ChangeType.ADDED, objectId, doc.getVersionFromInstant(), doc.getVersionToInstant(), now);
return doc;
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument update(ConfigDocument document) {
ArgumentChecker.notNull(document, "document");
ArgumentChecker.notNull(document.getUniqueId(), "document.uniqueId");
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
final UniqueId uniqueId = document.getUniqueId();
final Instant now = Instant.now();
final ConfigDocument storedDocument = _store.get(uniqueId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Config not found: " + uniqueId);
}
document.setVersionFromInstant(now);
document.setVersionToInstant(null);
document.setCorrectionFromInstant(now);
document.setCorrectionToInstant(null);
if (_store.replace(uniqueId.getObjectId(), storedDocument, document) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
_changeManager.entityChanged(ChangeType.CHANGED, document.getObjectId(), document.getVersionFromInstant(), document.getVersionToInstant(), now);
return document;
}
//-------------------------------------------------------------------------
@Override
public void remove(ObjectIdentifiable objectIdentifiable) {
ArgumentChecker.notNull(objectIdentifiable, "objectIdentifiable");
if (_store.remove(objectIdentifiable.getObjectId()) == null) {
throw new DataNotFoundException("Config not found: " + objectIdentifiable);
}
_changeManager.entityChanged(ChangeType.REMOVED, objectIdentifiable.getObjectId(), null, null, Instant.now());
}
//-------------------------------------------------------------------------
@Override
public ConfigDocument correct(ConfigDocument document) {
return update(document);
}
//-------------------------------------------------------------------------
@Override
public ChangeManager changeManager() {
return _changeManager;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigDocument get(UniqueId uniqueId) {
ArgumentChecker.notNull(uniqueId, "document.uniqueId");
final ConfigDocument document = _store.get(uniqueId.getObjectId());
if (document == null) {
throw new DataNotFoundException("Config not found: " + uniqueId.getObjectId());
}
return document;
}
//-------------------------------------------------------------------------
@Override
public ConfigMetaDataResult metaData(ConfigMetaDataRequest request) {
ArgumentChecker.notNull(request, "request");
ConfigMetaDataResult result = new ConfigMetaDataResult();
if (request.isConfigTypes()) {
Set<Class<?>> types = Sets.newHashSet();
for (ConfigDocument doc : _store.values()) {
types.add(doc.getObject().getType());
}
result.getConfigTypes().addAll(types);
}
return result;
}
//-------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public ConfigSearchResult search(ConfigSearchRequest request) {
ArgumentChecker.notNull(request, "request");
final List<ConfigDocument> list = new ArrayList<ConfigDocument>();
for (ConfigDocument doc : _store.values()) {
if (request.matches(doc)) {
list.add(doc);
}
}
final ConfigSearchResult result = new ConfigSearchResult();
result.setPaging(Paging.of(request.getPagingRequest(), list));
List<ConfigDocument> select = request.getPagingRequest().select(list);
result.getDocuments().addAll(select);
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public List<UniqueId> replaceVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(objectId, replacementDocuments);
}
@Override
public List<UniqueId> replaceVersion(UniqueId uniqueId, List<ConfigDocument> replacementDocuments) {
return replaceAllVersions(uniqueId.getObjectId(), replacementDocuments);
}
@Override
public List<UniqueId> replaceAllVersions(ObjectIdentifiable objectId, List<ConfigDocument> replacementDocuments) {
ArgumentChecker.notNull(replacementDocuments, "replacementDocuments");
ArgumentChecker.notNull(objectId, "objectId");
for (ConfigDocument replacementDocument : replacementDocuments) {
validateDocument(replacementDocument);
}
final Instant now = Instant.now();
ArgumentChecker.isTrue(MasterUtils.checkUniqueVersionsFrom(replacementDocuments), "No two versioned documents may have the same \"version from\" instant");
final ConfigDocument storedDocument = _store.get(objectId.getObjectId());
if (storedDocument == null) {
throw new DataNotFoundException("Document not found: " + objectId.getObjectId());
}
if (replacementDocuments.isEmpty()) {
_store.remove(objectId.getObjectId());
_changeManager.entityChanged(ChangeType.REMOVED, objectId.getObjectId(), null, null, now);
return Collections.emptyList();
} else {
Instant storedVersionFrom = storedDocument.getVersionFromInstant();
Instant storedVersionTo = storedDocument.getVersionToInstant();
List<ConfigDocument> orderedReplacementDocuments = MasterUtils.adjustVersionInstants(now, storedVersionFrom, storedVersionTo, replacementDocuments);
ConfigDocument lastReplacementDocument = orderedReplacementDocuments.get(orderedReplacementDocuments.size() - 1);
if (_store.replace(objectId.getObjectId(), storedDocument, lastReplacementDocument) == false) {
throw new IllegalArgumentException("Concurrent modification");
}
return ImmutableList.of(lastReplacementDocument.getUniqueId());
}
}
@Override
public UniqueId addVersion(ObjectIdentifiable objectId, ConfigDocument documentToAdd) {
List<UniqueId> result = replaceVersions(objectId, Collections.singletonList(documentToAdd));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
@Override
public void removeVersion(UniqueId uniqueId) {
replaceVersion(uniqueId, Collections.<ConfigDocument>emptyList());
}
@Override
public UniqueId replaceVersion(ConfigDocument replacementDocument) {
List<UniqueId> result = replaceVersion(replacementDocument.getUniqueId(), Collections.singletonList(replacementDocument));
if (result.isEmpty()) {
return null;
} else {
return result.get(0);
}
}
private <T> void validateDocument(ConfigDocument document) {
ArgumentChecker.notNull(document.getObject(), "document.object");
ArgumentChecker.notNull(document.getObject().getValue(), "document.object.value");
ArgumentChecker.notNull(document.getName(), "document.name");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Override
public ConfigHistoryResult history(ConfigHistoryRequest request) {
final Class<?> type = request.getType();
final ObjectId oid = request.getObjectId();
PagingRequest pagingRequest = request.getPagingRequest();
return new ConfigHistoryResult(
pagingRequest.select(
functional(_store.keySet())
.map(new Function1<ObjectId, ConfigDocument>() {
@Override
public ConfigDocument execute(ObjectId objectId) {
return _store.get(objectId);
}
})
.filter(new Function1<ConfigDocument, Boolean>() {
@Override
public Boolean execute(ConfigDocument configDocument) {
return
(oid == null || (configDocument.getObjectId().equals(oid)))
&&
(type == null || (type.isAssignableFrom(configDocument.getType())));
}
})
.sortBy(new Comparator<ConfigDocument>() {
@Override
public int compare(ConfigDocument configDocument, ConfigDocument configDocument1) {
return configDocument.getVersionFromInstant().compareTo(configDocument1.getVersionFromInstant());
}
})
.asList()));
}
@Override
public Map<UniqueId, ConfigDocument> get(Collection<UniqueId> uniqueIds) {
Map<UniqueId, ConfigDocument> resultMap = newHashMap();
for (UniqueId uniqueId : uniqueIds) {
resultMap.put(uniqueId, _store.get(uniqueId.getObjectId()));
}
return resultMap;
}
}
Diff Result
No diff
Case 75 - java_ogplatform.rev_dc917_de8c9..SnapshotDataBundleFormatter.java
Base
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.UniqueId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends NoHistoryFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
}
@Override
public String formatForDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
@Override
public List<List<String>> formatForExpandedDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<UniqueId, Double> dataPoints = bundle.getDataPoints();
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<UniqueId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatForDisplay(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
@Override
public FormatType getFormatForType() {
return FormatType.LABELLED_MATRIX_1D;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.UniqueId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends NoHistoryFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
}
@Override
public String formatForDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
@Override
public List<List<String>> formatForExpandedDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<UniqueId, Double> dataPoints = bundle.getDataPoints();
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<UniqueId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatForDisplay(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
@Override
public FormatType getFormatForType() {
return FormatType.LABELLED_MATRIX_1D;
}
}
Left
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.UniqueId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends AbstractFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
super(SnapshotDataBundle.class);
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
addFormatter(new Formatter<SnapshotDataBundle>(Format.EXPANDED) {
@Override
List<List<String>> format(SnapshotDataBundle value, ValueSpecification valueSpec) {
return formatExpanded(value, valueSpec);
}
});
}
@Override
public String formatCell(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
private List<List<String>> formatExpanded(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<UniqueId, Double> dataPoints = bundle.getDataPoints();
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<UniqueId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatCell(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
@Override
public DataType getDataType() {
return DataType.LABELLED_MATRIX_1D;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.UniqueId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends AbstractFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
super(SnapshotDataBundle.class);
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
addFormatter(new Formatter<SnapshotDataBundle>(Format.EXPANDED) {
@Override
List<List<String>> format(SnapshotDataBundle value, ValueSpecification valueSpec) {
return formatExpanded(value, valueSpec);
}
});
}
@Override
public String formatCell(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
private List<List<String>> formatExpanded(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<UniqueId, Double> dataPoints = bundle.getDataPoints();
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<UniqueId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatCell(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
@Override
public DataType getDataType() {
return DataType.LABELLED_MATRIX_1D;
}
}
Right
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.ExternalId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends NoHistoryFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
}
@Override
public String formatForDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
@Override
public List<List<String>> formatForExpandedDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<ExternalId, Double> dataPoints = bundle.getDataPoints();
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<ExternalId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatForDisplay(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
@Override
public FormatType getFormatForType() {
return FormatType.LABELLED_MATRIX_1D;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.ExternalId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends NoHistoryFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
}
@Override
public String formatForDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
@Override
public List<List<String>> formatForExpandedDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<ExternalId, Double> dataPoints = bundle.getDataPoints();
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<ExternalId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatForDisplay(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
@Override
public FormatType getFormatForType() {
return FormatType.LABELLED_MATRIX_1D;
}
}
MergeMethods
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.ExternalId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends AbstractFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
super(SnapshotDataBundle.class);
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
addFormatter(new Formatter<SnapshotDataBundle>(Format.EXPANDED) {
@Override
List<List<String>> format(SnapshotDataBundle value, ValueSpecification valueSpec) {
return formatExpanded(value, valueSpec);
}
});
}
@Override
public String formatCell(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
@Override
public DataType getDataType() {
return DataType.LABELLED_MATRIX_1D;
}
private List<List<String>> formatExpanded(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<ExternalId, Double> dataPoints = bundle.getDataPoints();
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<ExternalId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatForDisplay(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.ExternalId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends AbstractFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
super(SnapshotDataBundle.class);
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
addFormatter(new Formatter<SnapshotDataBundle>(Format.EXPANDED) {
@Override
List<List<String>> format(SnapshotDataBundle value, ValueSpecification valueSpec) {
return formatExpanded(value, valueSpec);
}
});
}
@Override
public String formatCell(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
@Override
public DataType getDataType() {
return DataType.LABELLED_MATRIX_1D;
}
private List<List<String>> formatExpanded(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<ExternalId, Double> dataPoints = bundle.getDataPoints();
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<ExternalId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatForDisplay(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
}
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.ExternalId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends AbstractFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
super(SnapshotDataBundle.class);
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
addFormatter(new Formatter<SnapshotDataBundle>(Format.EXPANDED) {
@Override
List<List<String>> format(SnapshotDataBundle value, ValueSpecification valueSpec) {
return formatExpanded(value, valueSpec);
}
});
}
@Override
public String formatCell(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
<<<<<<< MINE
private List<List<String>> formatExpanded(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<UniqueId, Double> dataPoints = bundle.getDataPoints();
=======
@Override
public List<List<String>> formatForExpandedDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<ExternalId, Double> dataPoints = bundle.getDataPoints();
>>>>>>> YOURS
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<ExternalId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatCell(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
@Override
public DataType getDataType() {
return DataType.LABELLED_MATRIX_1D;
}
}/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.web.server.push.analytics.formatting;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.opengamma.core.marketdatasnapshot.SnapshotDataBundle;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.id.ExternalId;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class SnapshotDataBundleFormatter extends AbstractFormatter<SnapshotDataBundle> {
private final DoubleFormatter _doubleFormatter;
public SnapshotDataBundleFormatter(DoubleFormatter doubleFormatter) {
super(SnapshotDataBundle.class);
ArgumentChecker.notNull(doubleFormatter, "doubleFormatter");
_doubleFormatter = doubleFormatter;
addFormatter(new Formatter<SnapshotDataBundle>(Format.EXPANDED) {
@Override
List<List<String>> format(SnapshotDataBundle value, ValueSpecification valueSpec) {
return formatExpanded(value, valueSpec);
}
});
}
@Override
public String formatCell(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
return "Data Bundle (" + bundle.getDataPoints().size() + " points)";
}
<<<<<<< MINE
private List<List<String>> formatExpanded(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<UniqueId, Double> dataPoints = bundle.getDataPoints();
=======
@Override
public List<List<String>> formatForExpandedDisplay(SnapshotDataBundle bundle, ValueSpecification valueSpec) {
Map<ExternalId, Double> dataPoints = bundle.getDataPoints();
>>>>>>> YOURS
List<List<String>> results = Lists.newArrayListWithCapacity(dataPoints.size());
for (Map.Entry<ExternalId, Double> entry : dataPoints.entrySet()) {
String idStr = entry.getKey().toString();
String formattedValue = _doubleFormatter.formatCell(entry.getValue(), valueSpec);
results.add(ImmutableList.of(idStr, formattedValue));
}
return results;
}
@Override
public DataType getDataType() {
return DataType.LABELLED_MATRIX_1D;
}
}
Diff Result
No diff
Case 76 - java_ogplatform.rev_e0ab5_a848b..InflationBuildingCurveSimpleTest.java
Base
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.interestrate.generator.GeneratorCurveYieldInterpolated;
import com.opengamma.analytics.financial.curve.interestrate.generator.GeneratorYDCurve;
import com.opengamma.analytics.financial.forex.method.FXMatrix;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorDepositIbor;
import com.opengamma.analytics.financial.instrument.index.GeneratorDepositON;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedIbor;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedIborMaster;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedON;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedONMaster;
import com.opengamma.analytics.financial.instrument.index.IborIndex;
import com.opengamma.analytics.financial.instrument.index.IndexON;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.ParSpreadMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.PresentValueDiscountingCalculator;
import com.opengamma.analytics.financial.provider.curve.multicurve.MulticurveDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderInterface;
import com.opengamma.analytics.financial.provider.sensitivity.multicurve.MulticurveSensitivity;
import com.opengamma.analytics.financial.schedule.ScheduleCalculator;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final FXMatrix FX_MATRIX = new FXMatrix(USD);
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedON GENERATOR_OIS_USD = GeneratorSwapFixedONMaster.getInstance().getGenerator("USD1YFEDFUND", NYC);
private static final IndexON INDEX_ON_USD = GENERATOR_OIS_USD.getIndex();
private static final GeneratorDepositON GENERATOR_DEPOSIT_ON_USD = new GeneratorDepositON("USD Deposit ON", USD, NYC, INDEX_ON_USD.getDayCount());
private static final GeneratorSwapFixedIborMaster GENERATOR_SWAP_MASTER = GeneratorSwapFixedIborMaster.getInstance();
private static final GeneratorSwapFixedIbor USD6MLIBOR3M = GENERATOR_SWAP_MASTER.getGenerator("USD6MLIBOR3M", NYC);
private static final IborIndex USDLIBOR3M = USD6MLIBOR3M.getIborIndex();
private static final GeneratorDepositIbor GENERATOR_USDLIBOR3M = new GeneratorDepositIbor("GENERATOR_USDLIBOR3M", USDLIBOR3M);
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_ON_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.07, 0.08 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_ON_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.07, 0.08 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_OIS_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_EMPTY, TS_ON_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_OIS_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_EMPTY, TS_ON_USD_WITHOUT_TODAY };
private static final ArrayZonedDateTimeDoubleTimeSeries TS_IBOR_USD3M_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.0035, 0.0036 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_IBOR_USD3M_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {0.0035 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_IBOR_USD3M_WITH_TODAY = new DoubleTimeSeries[] {TS_IBOR_USD3M_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_IBOR_USD3M_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_IBOR_USD3M_WITHOUT_TODAY };
private static final String CURVE_NAME_DSC_USD = "USD Dsc";
private static final String CURVE_NAME_FWD3_USD = "USD Fwd 3M";
/** Market values for the dsc USD curve */
public static final double[] DSC_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the dsc USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] DSC_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_DEPOSIT_ON_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD,
GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD };
/** Tenors for the dsc USD curve */
public static final Period[] DSC_USD_TENOR = new Period[] {DateUtils.periodOfDays(0), DateUtils.periodOfMonths(1), DateUtils.periodOfMonths(2), DateUtils.periodOfMonths(3),
DateUtils.periodOfMonths(6), DateUtils.periodOfMonths(9), DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] DSC_USD_ATTR = new GeneratorAttributeIR[DSC_USD_TENOR.length];
static {
for (int loopins = 0; loopins < DSC_USD_TENOR.length; loopins++) {
DSC_USD_ATTR[loopins] = new GeneratorAttributeIR(DSC_USD_TENOR[loopins]);
}
}
/** Market values for the Fwd 3M USD curve */
public static final double[] FWD3_USD_MARKET_QUOTES = new double[] {0.0420, 0.0420, 0.0420, 0.0430, 0.0470, 0.0540, 0.0570, 0.0600 };
/** Generators for the Fwd 3M USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] FWD3_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_USDLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M,
USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M };
/** Tenors for the Fwd 3M USD curve */
public static final Period[] FWD3_USD_TENOR = new Period[] {DateUtils.periodOfMonths(0), DateUtils.periodOfMonths(6), DateUtils.periodOfYears(1), DateUtils.periodOfYears(2),
DateUtils.periodOfYears(3), DateUtils.periodOfYears(5), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] FWD3_USD_ATTR = new GeneratorAttributeIR[FWD3_USD_TENOR.length];
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_DSC_USD;
/** Standard USD Forward 3M curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_FWD3_USD;
/** Units of curves */
public static final int[] NB_UNITS = new int[] {2 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorYDCurve[][][] GENERATORS_UNITS = new GeneratorYDCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount KNOWN_DATA = new MulticurveProviderDiscount(FX_MATRIX);
public static final LinkedHashMap<String, Currency> DSC_MAP = new LinkedHashMap<String, Currency>();
public static final LinkedHashMap<String, IndexON[]> FWD_ON_MAP = new LinkedHashMap<String, IndexON[]>();
public static final LinkedHashMap<String, IborIndex[]> FWD_IBOR_MAP = new LinkedHashMap<String, IborIndex[]>();
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] {DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] {genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] {CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] {INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] {USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingCalculator PVC = PresentValueDiscountingCalculator.getInstance();
private static final ParSpreadMarketQuoteDiscountingCalculator PSMQC = ParSpreadMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator PSMQCSC = ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
private static final MulticurveDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new MulticurveDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSMQC, PSMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final MulticurveProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
@Test(enabled = false)
/**
* Analyzes the shape of the forward curve.
*/
public void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}
private static Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions, final GeneratorYDCurve[][] curveGenerators,
final String[][] curveNames, final MulticurveProviderDiscount knownData, final InstrumentDerivativeVisitor<MulticurveProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<MulticurveProviderInterface, MulticurveSensitivity> sensitivityCalculator, final boolean withToday) {
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorYDCurve[][] generatorFinal = new GeneratorYDCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorYDCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, DSC_MAP, FWD_IBOR_MAP, FWD_ON_MAP, calculator,
sensitivityCalculator);
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedONDefinition) {
ird = ((SwapFixedONDefinition) instrument).toDerivative(NOW, getTSSwapFixedON(withToday, unit), NOT_USED_2);
} else {
if (instrument instanceof SwapFixedIborDefinition) {
ird = ((SwapFixedIborDefinition) instrument).toDerivative(NOW, getTSSwapFixedIbor(withToday, unit), NOT_USED_2);
} else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedON(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_OIS_USD_WITH_TODAY : TS_FIXED_OIS_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 0.01;
}
}
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.interestrate.generator.GeneratorCurveYieldInterpolated;
import com.opengamma.analytics.financial.curve.interestrate.generator.GeneratorYDCurve;
import com.opengamma.analytics.financial.forex.method.FXMatrix;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorDepositIbor;
import com.opengamma.analytics.financial.instrument.index.GeneratorDepositON;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedIbor;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedIborMaster;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedON;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedONMaster;
import com.opengamma.analytics.financial.instrument.index.IborIndex;
import com.opengamma.analytics.financial.instrument.index.IndexON;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.ParSpreadMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.PresentValueDiscountingCalculator;
import com.opengamma.analytics.financial.provider.curve.multicurve.MulticurveDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderInterface;
import com.opengamma.analytics.financial.provider.sensitivity.multicurve.MulticurveSensitivity;
import com.opengamma.analytics.financial.schedule.ScheduleCalculator;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final FXMatrix FX_MATRIX = new FXMatrix(USD);
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedON GENERATOR_OIS_USD = GeneratorSwapFixedONMaster.getInstance().getGenerator("USD1YFEDFUND", NYC);
private static final IndexON INDEX_ON_USD = GENERATOR_OIS_USD.getIndex();
private static final GeneratorDepositON GENERATOR_DEPOSIT_ON_USD = new GeneratorDepositON("USD Deposit ON", USD, NYC, INDEX_ON_USD.getDayCount());
private static final GeneratorSwapFixedIborMaster GENERATOR_SWAP_MASTER = GeneratorSwapFixedIborMaster.getInstance();
private static final GeneratorSwapFixedIbor USD6MLIBOR3M = GENERATOR_SWAP_MASTER.getGenerator("USD6MLIBOR3M", NYC);
private static final IborIndex USDLIBOR3M = USD6MLIBOR3M.getIborIndex();
private static final GeneratorDepositIbor GENERATOR_USDLIBOR3M = new GeneratorDepositIbor("GENERATOR_USDLIBOR3M", USDLIBOR3M);
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_ON_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.07, 0.08 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_ON_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.07, 0.08 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_OIS_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_EMPTY, TS_ON_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_OIS_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_EMPTY, TS_ON_USD_WITHOUT_TODAY };
private static final ArrayZonedDateTimeDoubleTimeSeries TS_IBOR_USD3M_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.0035, 0.0036 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_IBOR_USD3M_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {0.0035 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_IBOR_USD3M_WITH_TODAY = new DoubleTimeSeries[] {TS_IBOR_USD3M_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_IBOR_USD3M_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_IBOR_USD3M_WITHOUT_TODAY };
private static final String CURVE_NAME_DSC_USD = "USD Dsc";
private static final String CURVE_NAME_FWD3_USD = "USD Fwd 3M";
/** Market values for the dsc USD curve */
public static final double[] DSC_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the dsc USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] DSC_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_DEPOSIT_ON_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD,
GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD };
/** Tenors for the dsc USD curve */
public static final Period[] DSC_USD_TENOR = new Period[] {DateUtils.periodOfDays(0), DateUtils.periodOfMonths(1), DateUtils.periodOfMonths(2), DateUtils.periodOfMonths(3),
DateUtils.periodOfMonths(6), DateUtils.periodOfMonths(9), DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] DSC_USD_ATTR = new GeneratorAttributeIR[DSC_USD_TENOR.length];
static {
for (int loopins = 0; loopins < DSC_USD_TENOR.length; loopins++) {
DSC_USD_ATTR[loopins] = new GeneratorAttributeIR(DSC_USD_TENOR[loopins]);
}
}
/** Market values for the Fwd 3M USD curve */
public static final double[] FWD3_USD_MARKET_QUOTES = new double[] {0.0420, 0.0420, 0.0420, 0.0430, 0.0470, 0.0540, 0.0570, 0.0600 };
/** Generators for the Fwd 3M USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] FWD3_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_USDLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M,
USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M };
/** Tenors for the Fwd 3M USD curve */
public static final Period[] FWD3_USD_TENOR = new Period[] {DateUtils.periodOfMonths(0), DateUtils.periodOfMonths(6), DateUtils.periodOfYears(1), DateUtils.periodOfYears(2),
DateUtils.periodOfYears(3), DateUtils.periodOfYears(5), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] FWD3_USD_ATTR = new GeneratorAttributeIR[FWD3_USD_TENOR.length];
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_DSC_USD;
/** Standard USD Forward 3M curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_FWD3_USD;
/** Units of curves */
public static final int[] NB_UNITS = new int[] {2 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorYDCurve[][][] GENERATORS_UNITS = new GeneratorYDCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount KNOWN_DATA = new MulticurveProviderDiscount(FX_MATRIX);
public static final LinkedHashMap<String, Currency> DSC_MAP = new LinkedHashMap<String, Currency>();
public static final LinkedHashMap<String, IndexON[]> FWD_ON_MAP = new LinkedHashMap<String, IndexON[]>();
public static final LinkedHashMap<String, IborIndex[]> FWD_IBOR_MAP = new LinkedHashMap<String, IborIndex[]>();
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] {DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] {genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] {CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] {INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] {USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingCalculator PVC = PresentValueDiscountingCalculator.getInstance();
private static final ParSpreadMarketQuoteDiscountingCalculator PSMQC = ParSpreadMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator PSMQCSC = ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
private static final MulticurveDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new MulticurveDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSMQC, PSMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final MulticurveProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
@Test(enabled = false)
/**
* Analyzes the shape of the forward curve.
*/
public void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}
private static Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions, final GeneratorYDCurve[][] curveGenerators,
final String[][] curveNames, final MulticurveProviderDiscount knownData, final InstrumentDerivativeVisitor<MulticurveProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<MulticurveProviderInterface, MulticurveSensitivity> sensitivityCalculator, final boolean withToday) {
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorYDCurve[][] generatorFinal = new GeneratorYDCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorYDCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, DSC_MAP, FWD_IBOR_MAP, FWD_ON_MAP, calculator,
sensitivityCalculator);
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedONDefinition) {
ird = ((SwapFixedONDefinition) instrument).toDerivative(NOW, getTSSwapFixedON(withToday, unit), NOT_USED_2);
} else {
if (instrument instanceof SwapFixedIborDefinition) {
ird = ((SwapFixedIborDefinition) instrument).toDerivative(NOW, getTSSwapFixedIbor(withToday, unit), NOT_USED_2);
} else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedON(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_OIS_USD_WITH_TODAY : TS_FIXED_OIS_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 0.01;
}
}
Left
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Currency USD = Currency.USD;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] {DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20),
DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
/** Units of curves */
public static final int[] NB_UNITS = new int[] {1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] {US_CPI });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*//**
* Analyzes the shape of the curve.
*/
/*
public void forwardAnalysis() {
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions,
final GeneratorPriceIndexCurve[][] curveGenerators,
final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday)
{
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator,
sensitivityCalculator);
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
}
else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Currency USD = Currency.USD;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] {DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20),
DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
/** Units of curves */
public static final int[] NB_UNITS = new int[] {1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] {US_CPI });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*//**
* Analyzes the shape of the curve.
*/
/*
public void forwardAnalysis() {
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions,
final GeneratorPriceIndexCurve[][] curveGenerators,
final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday)
{
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator,
sensitivityCalculator);
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
}
else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
Right
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.interestrate.generator.GeneratorCurveYieldInterpolated;
import com.opengamma.analytics.financial.curve.interestrate.generator.GeneratorYDCurve;
import com.opengamma.analytics.financial.forex.method.FXMatrix;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorDepositIbor;
import com.opengamma.analytics.financial.instrument.index.GeneratorDepositON;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedIbor;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedIborMaster;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedON;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedONMaster;
import com.opengamma.analytics.financial.instrument.index.IborIndex;
import com.opengamma.analytics.financial.instrument.index.IndexON;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.ParSpreadMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.PresentValueDiscountingCalculator;
import com.opengamma.analytics.financial.provider.curve.multicurve.MulticurveDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderInterface;
import com.opengamma.analytics.financial.provider.sensitivity.multicurve.MulticurveSensitivity;
import com.opengamma.analytics.financial.schedule.ScheduleCalculator;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final FXMatrix FX_MATRIX = new FXMatrix(USD);
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedON GENERATOR_OIS_USD = GeneratorSwapFixedONMaster.getInstance().getGenerator("USD1YFEDFUND", NYC);
private static final IndexON INDEX_ON_USD = GENERATOR_OIS_USD.getIndex();
private static final GeneratorDepositON GENERATOR_DEPOSIT_ON_USD = new GeneratorDepositON("USD Deposit ON", USD, NYC, INDEX_ON_USD.getDayCount());
private static final GeneratorSwapFixedIborMaster GENERATOR_SWAP_MASTER = GeneratorSwapFixedIborMaster.getInstance();
private static final GeneratorSwapFixedIbor USD6MLIBOR3M = GENERATOR_SWAP_MASTER.getGenerator("USD6MLIBOR3M", NYC);
private static final IborIndex USDLIBOR3M = USD6MLIBOR3M.getIborIndex();
private static final GeneratorDepositIbor GENERATOR_USDLIBOR3M = new GeneratorDepositIbor("GENERATOR_USDLIBOR3M", USDLIBOR3M);
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_ON_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.07, 0.08 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_ON_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.07, 0.08 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_OIS_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_EMPTY, TS_ON_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_OIS_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_EMPTY, TS_ON_USD_WITHOUT_TODAY };
private static final ArrayZonedDateTimeDoubleTimeSeries TS_IBOR_USD3M_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.0035, 0.0036 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_IBOR_USD3M_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {0.0035 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_IBOR_USD3M_WITH_TODAY = new DoubleTimeSeries[] {TS_IBOR_USD3M_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_IBOR_USD3M_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_IBOR_USD3M_WITHOUT_TODAY };
private static final String CURVE_NAME_DSC_USD = "USD Dsc";
private static final String CURVE_NAME_FWD3_USD = "USD Fwd 3M";
/** Market values for the dsc USD curve */
public static final double[] DSC_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the dsc USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] DSC_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_DEPOSIT_ON_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD,
GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD };
/** Tenors for the dsc USD curve */
public static final Period[] DSC_USD_TENOR = new Period[] {DateUtils.periodOfDays(0), DateUtils.periodOfMonths(1), DateUtils.periodOfMonths(2), DateUtils.periodOfMonths(3),
DateUtils.periodOfMonths(6), DateUtils.periodOfMonths(9), DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] DSC_USD_ATTR = new GeneratorAttributeIR[DSC_USD_TENOR.length];
static {
for (int loopins = 0; loopins < DSC_USD_TENOR.length; loopins++) {
DSC_USD_ATTR[loopins] = new GeneratorAttributeIR(DSC_USD_TENOR[loopins]);
}
}
/** Market values for the Fwd 3M USD curve */
public static final double[] FWD3_USD_MARKET_QUOTES = new double[] {0.0420, 0.0420, 0.0420, 0.0430, 0.0470, 0.0540, 0.0570, 0.0600 };
/** Generators for the Fwd 3M USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] FWD3_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_USDLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M,
USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M };
/** Tenors for the Fwd 3M USD curve */
public static final Period[] FWD3_USD_TENOR = new Period[] {DateUtils.periodOfMonths(0), DateUtils.periodOfMonths(6), DateUtils.periodOfYears(1), DateUtils.periodOfYears(2),
DateUtils.periodOfYears(3), DateUtils.periodOfYears(5), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] FWD3_USD_ATTR = new GeneratorAttributeIR[FWD3_USD_TENOR.length];
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_DSC_USD;
/** Standard USD Forward 3M curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_FWD3_USD;
/** Units of curves */
public static final int[] NB_UNITS = new int[] {2 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorYDCurve[][][] GENERATORS_UNITS = new GeneratorYDCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount KNOWN_DATA = new MulticurveProviderDiscount(FX_MATRIX);
public static final LinkedHashMap<String, Currency> DSC_MAP = new LinkedHashMap<String, Currency>();
public static final LinkedHashMap<String, IndexON[]> FWD_ON_MAP = new LinkedHashMap<String, IndexON[]>();
public static final LinkedHashMap<String, IborIndex[]> FWD_IBOR_MAP = new LinkedHashMap<String, IborIndex[]>();
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] {DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] {genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] {CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] {INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] {USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingCalculator PVC = PresentValueDiscountingCalculator.getInstance();
private static final ParSpreadMarketQuoteDiscountingCalculator PSMQC = ParSpreadMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator PSMQCSC = ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
private static final MulticurveDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new MulticurveDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSMQC, PSMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final MulticurveProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
@Test(enabled = false)
/**
* Analyzes the shape of the forward curve.
*/
public void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
}
private static Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions, final GeneratorYDCurve[][] curveGenerators,
final String[][] curveNames, final MulticurveProviderDiscount knownData, final InstrumentDerivativeVisitor<MulticurveProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<MulticurveProviderInterface, MulticurveSensitivity> sensitivityCalculator, final boolean withToday) {
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorYDCurve[][] generatorFinal = new GeneratorYDCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorYDCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, DSC_MAP, FWD_IBOR_MAP, FWD_ON_MAP, calculator,
sensitivityCalculator);
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedONDefinition) {
ird = ((SwapFixedONDefinition) instrument).toDerivative(NOW, getTSSwapFixedON(withToday, unit), NOT_USED_2);
} else {
if (instrument instanceof SwapFixedIborDefinition) {
ird = ((SwapFixedIborDefinition) instrument).toDerivative(NOW, getTSSwapFixedIbor(withToday, unit), NOT_USED_2);
} else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedON(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_OIS_USD_WITH_TODAY : TS_FIXED_OIS_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch (unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 0.01;
}
}
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.interestrate.generator.GeneratorCurveYieldInterpolated;
import com.opengamma.analytics.financial.curve.interestrate.generator.GeneratorYDCurve;
import com.opengamma.analytics.financial.forex.method.FXMatrix;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorDepositIbor;
import com.opengamma.analytics.financial.instrument.index.GeneratorDepositON;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedIbor;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedIborMaster;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedON;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedONMaster;
import com.opengamma.analytics.financial.instrument.index.IborIndex;
import com.opengamma.analytics.financial.instrument.index.IndexON;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.ParSpreadMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.discounting.PresentValueDiscountingCalculator;
import com.opengamma.analytics.financial.provider.curve.multicurve.MulticurveDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderInterface;
import com.opengamma.analytics.financial.provider.sensitivity.multicurve.MulticurveSensitivity;
import com.opengamma.analytics.financial.schedule.ScheduleCalculator;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.analytics.util.time.TimeCalculator;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final FXMatrix FX_MATRIX = new FXMatrix(USD);
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedON GENERATOR_OIS_USD = GeneratorSwapFixedONMaster.getInstance().getGenerator("USD1YFEDFUND", NYC);
private static final IndexON INDEX_ON_USD = GENERATOR_OIS_USD.getIndex();
private static final GeneratorDepositON GENERATOR_DEPOSIT_ON_USD = new GeneratorDepositON("USD Deposit ON", USD, NYC, INDEX_ON_USD.getDayCount());
private static final GeneratorSwapFixedIborMaster GENERATOR_SWAP_MASTER = GeneratorSwapFixedIborMaster.getInstance();
private static final GeneratorSwapFixedIbor USD6MLIBOR3M = GENERATOR_SWAP_MASTER.getGenerator("USD6MLIBOR3M", NYC);
private static final IborIndex USDLIBOR3M = USD6MLIBOR3M.getIborIndex();
private static final GeneratorDepositIbor GENERATOR_USDLIBOR3M = new GeneratorDepositIbor("GENERATOR_USDLIBOR3M", USDLIBOR3M);
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_ON_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.07, 0.08 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_ON_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.07, 0.08 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_OIS_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_EMPTY, TS_ON_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_OIS_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_EMPTY, TS_ON_USD_WITHOUT_TODAY };
private static final ArrayZonedDateTimeDoubleTimeSeries TS_IBOR_USD3M_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {0.0035, 0.0036 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_IBOR_USD3M_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {0.0035 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_IBOR_USD3M_WITH_TODAY = new DoubleTimeSeries[] {TS_IBOR_USD3M_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_IBOR_USD3M_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_IBOR_USD3M_WITHOUT_TODAY };
private static final String CURVE_NAME_DSC_USD = "USD Dsc";
private static final String CURVE_NAME_FWD3_USD = "USD Fwd 3M";
/** Market values for the dsc USD curve */
public static final double[] DSC_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the dsc USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] DSC_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_DEPOSIT_ON_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD,
GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD };
/** Tenors for the dsc USD curve */
public static final Period[] DSC_USD_TENOR = new Period[] {DateUtils.periodOfDays(0), DateUtils.periodOfMonths(1), DateUtils.periodOfMonths(2), DateUtils.periodOfMonths(3),
DateUtils.periodOfMonths(6), DateUtils.periodOfMonths(9), DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] DSC_USD_ATTR = new GeneratorAttributeIR[DSC_USD_TENOR.length];
static {
for (int loopins = 0; loopins < DSC_USD_TENOR.length; loopins++) {
DSC_USD_ATTR[loopins] = new GeneratorAttributeIR(DSC_USD_TENOR[loopins]);
}
}
/** Market values for the Fwd 3M USD curve */
public static final double[] FWD3_USD_MARKET_QUOTES = new double[] {0.0420, 0.0420, 0.0420, 0.0430, 0.0470, 0.0540, 0.0570, 0.0600 };
/** Generators for the Fwd 3M USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] FWD3_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_USDLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M,
USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M };
/** Tenors for the Fwd 3M USD curve */
public static final Period[] FWD3_USD_TENOR = new Period[] {DateUtils.periodOfMonths(0), DateUtils.periodOfMonths(6), DateUtils.periodOfYears(1), DateUtils.periodOfYears(2),
DateUtils.periodOfYears(3), DateUtils.periodOfYears(5), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] FWD3_USD_ATTR = new GeneratorAttributeIR[FWD3_USD_TENOR.length];
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_DSC_USD;
/** Standard USD Forward 3M curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_FWD3_USD;
/** Units of curves */
public static final int[] NB_UNITS = new int[] {2 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorYDCurve[][][] GENERATORS_UNITS = new GeneratorYDCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount KNOWN_DATA = new MulticurveProviderDiscount(FX_MATRIX);
public static final LinkedHashMap<String, Currency> DSC_MAP = new LinkedHashMap<String, Currency>();
public static final LinkedHashMap<String, IndexON[]> FWD_ON_MAP = new LinkedHashMap<String, IndexON[]>();
public static final LinkedHashMap<String, IborIndex[]> FWD_IBOR_MAP = new LinkedHashMap<String, IborIndex[]>();
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] {DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] {genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] {CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] {INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] {USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingCalculator PVC = PresentValueDiscountingCalculator.getInstance();
private static final ParSpreadMarketQuoteDiscountingCalculator PSMQC = ParSpreadMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator PSMQCSC = ParSpreadMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
private static final MulticurveDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new MulticurveDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSMQC, PSMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final MulticurveProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
@Test(enabled = false)
/**
* Analyzes the shape of the forward curve.
*/
public void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
}
private static Pair<MulticurveProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions, final GeneratorYDCurve[][] curveGenerators,
final String[][] curveNames, final MulticurveProviderDiscount knownData, final InstrumentDerivativeVisitor<MulticurveProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<MulticurveProviderInterface, MulticurveSensitivity> sensitivityCalculator, final boolean withToday) {
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorYDCurve[][] generatorFinal = new GeneratorYDCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorYDCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, DSC_MAP, FWD_IBOR_MAP, FWD_ON_MAP, calculator,
sensitivityCalculator);
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedONDefinition) {
ird = ((SwapFixedONDefinition) instrument).toDerivative(NOW, getTSSwapFixedON(withToday, unit), NOT_USED_2);
} else {
if (instrument instanceof SwapFixedIborDefinition) {
ird = ((SwapFixedIborDefinition) instrument).toDerivative(NOW, getTSSwapFixedIbor(withToday, unit), NOT_USED_2);
} else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedON(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_OIS_USD_WITH_TODAY : TS_FIXED_OIS_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch (unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 0.01;
}
}
MergeMethods
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR, Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] { DateUtils.getUTCDate(2011, 9, 27), DateUtils.getUTCDate(2011, 9, 28) }, new double[] { 200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] { DateUtils.getUTCDate(2011, 9, 27) }, new double[] { 100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] { TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] { TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] { 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] { GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] { DateUtils.periodOfYears(1), DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7), DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20), DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
/** Standard USD Forward 3M curve instrument definitions */
/** Units of curves */
public static final int[] NB_UNITS = new int[] { 1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] { DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] { genIntLin };
NAMES_UNITS[0][0] = new String[] { CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] { US_CPI });
}
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] { DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] { DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] { genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] { genIntLin };
NAMES_UNITS[0][0] = new String[] { CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] { CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] { INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] { USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = { NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
// Calculator
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*/
//**
/*
public void forwardAnalysis() {
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions, final GeneratorPriceIndexCurve[][] curveGenerators, final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator, final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday) {
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
// First parameter index of the curve in the unit.
int startCurve = 0;
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator, sensitivityCalculator);
}
@Test(enabled = false)
public /**
* Analyzes the shape of the forward curve.
*/
void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
} else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch(unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR, Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] { DateUtils.getUTCDate(2011, 9, 27), DateUtils.getUTCDate(2011, 9, 28) }, new double[] { 200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] { DateUtils.getUTCDate(2011, 9, 27) }, new double[] { 100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] { TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] { TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] { 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] { GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] { DateUtils.periodOfYears(1), DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7), DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20), DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
/** Standard USD Forward 3M curve instrument definitions */
/** Units of curves */
public static final int[] NB_UNITS = new int[] { 1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] { DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] { genIntLin };
NAMES_UNITS[0][0] = new String[] { CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] { US_CPI });
}
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] { DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] { DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] { genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] { genIntLin };
NAMES_UNITS[0][0] = new String[] { CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] { CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] { INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] { USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = { NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
// Calculator
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*/
//**
/*
public void forwardAnalysis() {
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions, final GeneratorPriceIndexCurve[][] curveGenerators, final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator, final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday) {
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
// First parameter index of the curve in the unit.
int startCurve = 0;
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator, sensitivityCalculator);
}
@Test(enabled = false)
public /**
* Analyzes the shape of the forward curve.
*/
void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
} else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch(unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
KeepBothMethods
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR, Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] { DateUtils.getUTCDate(2011, 9, 27), DateUtils.getUTCDate(2011, 9, 28) }, new double[] { 200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] { DateUtils.getUTCDate(2011, 9, 27) }, new double[] { 100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] { TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] { TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] { 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] { GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] { DateUtils.periodOfYears(1), DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7), DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20), DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
/** Standard USD Forward 3M curve instrument definitions */
/** Units of curves */
public static final int[] NB_UNITS = new int[] { 1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] { DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] { genIntLin };
NAMES_UNITS[0][0] = new String[] { CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] { US_CPI });
}
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] { DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] { DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] { genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] { genIntLin };
NAMES_UNITS[0][0] = new String[] { CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] { CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] { INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] { USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = { NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
// Calculator
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*/
//**
/*
public void forwardAnalysis() {
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions, final GeneratorPriceIndexCurve[][] curveGenerators, final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator, final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday) {
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
// First parameter index of the curve in the unit.
int startCurve = 0;
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator, sensitivityCalculator);
}
@Test(enabled = false)
public /**
* Analyzes the shape of the forward curve.
*/
void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
} else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
switch(unit) {
case 0:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch(unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR, Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] { DateUtils.getUTCDate(2011, 9, 27), DateUtils.getUTCDate(2011, 9, 28) }, new double[] { 200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] { DateUtils.getUTCDate(2011, 9, 27) }, new double[] { 100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] { TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] { TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] { 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] { GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] { DateUtils.periodOfYears(1), DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7), DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20), DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
/** Standard USD Forward 3M curve instrument definitions */
/** Units of curves */
public static final int[] NB_UNITS = new int[] { 1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] { DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] { genIntLin };
NAMES_UNITS[0][0] = new String[] { CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] { US_CPI });
}
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] { DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] { DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] { genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] { genIntLin };
NAMES_UNITS[0][0] = new String[] { CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] { CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] { INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] { USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = { NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
// Calculator
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*/
//**
/*
public void forwardAnalysis() {
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions, final GeneratorPriceIndexCurve[][] curveGenerators, final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator, final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday) {
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
// First parameter index of the curve in the unit.
int startCurve = 0;
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator, sensitivityCalculator);
}
@Test(enabled = false)
public /**
* Analyzes the shape of the forward curve.
*/
void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
} else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
switch(unit) {
case 0:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch(unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
Safe
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] {DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20),
DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
/** Market values for the dsc USD curve */
/** Generators for the dsc USD curve */
/** Tenors for the dsc USD curve */
/** Market values for the Fwd 3M USD curve */
/** Generators for the Fwd 3M USD curve */
/** Tenors for the Fwd 3M USD curve */
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
/** Standard USD Forward 3M curve instrument definitions */
/** Units of curves */
public static final int[] NB_UNITS = new int[] {1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] {US_CPI });
}
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] {DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] {genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] {CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] {INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] {USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
// Calculator
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*//**
* Analyzes the shape of the curve.
*/
/*
public void forwardAnalysis() {
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions,
final GeneratorPriceIndexCurve[][] curveGenerators,
final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday)
{
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator,
sensitivityCalculator);
}
@Test(enabled = false)
/**
* Analyzes the shape of the forward curve.
*/
public void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
}
else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
<<<<<<< MINE
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch (unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
=======
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
>>>>>>> YOURS
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final Currency USD = Currency.USD;
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] {DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20),
DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
/** Market values for the dsc USD curve */
/** Generators for the dsc USD curve */
/** Tenors for the dsc USD curve */
/** Market values for the Fwd 3M USD curve */
/** Generators for the Fwd 3M USD curve */
/** Tenors for the Fwd 3M USD curve */
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
/** Standard USD Forward 3M curve instrument definitions */
/** Units of curves */
public static final int[] NB_UNITS = new int[] {1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] {US_CPI });
}
static {
DEFINITIONS_DSC_USD = getDefinitions(DSC_USD_MARKET_QUOTES, DSC_USD_GENERATORS, DSC_USD_ATTR);
DEFINITIONS_FWD3_USD = getDefinitions(FWD3_USD_MARKET_QUOTES, FWD3_USD_GENERATORS, FWD3_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorYDCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_DSC_USD };
DEFINITIONS_UNITS[0][1] = new InstrumentDefinition<?>[][] {DEFINITIONS_FWD3_USD };
final GeneratorYDCurve genIntLin = new GeneratorCurveYieldInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorYDCurve[] {genIntLin };
GENERATORS_UNITS[0][1] = new GeneratorYDCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_DSC_USD };
NAMES_UNITS[0][1] = new String[] {CURVE_NAME_FWD3_USD };
DSC_MAP.put(CURVE_NAME_DSC_USD, USD);
FWD_ON_MAP.put(CURVE_NAME_DSC_USD, new IndexON[] {INDEX_ON_USD });
FWD_IBOR_MAP.put(CURVE_NAME_FWD3_USD, new IborIndex[] {USDLIBOR3M });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
// Calculator
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*//**
* Analyzes the shape of the curve.
*/
/*
public void forwardAnalysis() {
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions,
final GeneratorPriceIndexCurve[][] curveGenerators,
final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday)
{
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator,
sensitivityCalculator);
}
@Test(enabled = false)
/**
* Analyzes the shape of the forward curve.
*/
public void forwardAnalysis() {
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
}
else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
<<<<<<< MINE
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch (unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
=======
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
case 1:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
>>>>>>> YOURS
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
Unstructured
/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Currency USD = Currency.USD;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
<<<<<<< MINE
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] {DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20),
DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
=======
/** Market values for the dsc USD curve */
public static final double[] DSC_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the dsc USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] DSC_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_DEPOSIT_ON_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD,
GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD };
/** Tenors for the dsc USD curve */
public static final Period[] DSC_USD_TENOR = new Period[] {DateUtils.periodOfDays(0), DateUtils.periodOfMonths(1), DateUtils.periodOfMonths(2), DateUtils.periodOfMonths(3),
DateUtils.periodOfMonths(6), DateUtils.periodOfMonths(9), DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] DSC_USD_ATTR = new GeneratorAttributeIR[DSC_USD_TENOR.length];
>>>>>>> YOURS
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
<<<<<<< MINE
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
=======
/** Market values for the Fwd 3M USD curve */
public static final double[] FWD3_USD_MARKET_QUOTES = new double[] {0.0420, 0.0420, 0.0420, 0.0430, 0.0470, 0.0540, 0.0570, 0.0600 };
/** Generators for the Fwd 3M USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] FWD3_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_USDLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M,
USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M };
/** Tenors for the Fwd 3M USD curve */
public static final Period[] FWD3_USD_TENOR = new Period[] {DateUtils.periodOfMonths(0), DateUtils.periodOfMonths(6), DateUtils.periodOfYears(1), DateUtils.periodOfYears(2),
DateUtils.periodOfYears(3), DateUtils.periodOfYears(5), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] FWD3_USD_ATTR = new GeneratorAttributeIR[FWD3_USD_TENOR.length];
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_DSC_USD;
/** Standard USD Forward 3M curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_FWD3_USD;
>>>>>>> YOURS
/** Units of curves */
public static final int[] NB_UNITS = new int[] {1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] {US_CPI });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*//**
* Analyzes the shape of the curve.
*/
/*
public void forwardAnalysis() {
<<<<<<< MINE
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
=======
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
>>>>>>> YOURS
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions,
final GeneratorPriceIndexCurve[][] curveGenerators,
final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday)
{
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator,
sensitivityCalculator);
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
}
else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
<<<<<<< MINE
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
=======
return withToday ? TS_FIXED_OIS_USD_WITH_TODAY : TS_FIXED_OIS_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch (unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
>>>>>>> YOURS
case 1:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}/**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.provider.curve;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import org.threeten.bp.Period;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurve;
import com.opengamma.analytics.financial.curve.inflation.generator.GeneratorPriceIndexCurveInterpolated;
import com.opengamma.analytics.financial.instrument.InstrumentDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttribute;
import com.opengamma.analytics.financial.instrument.index.GeneratorAttributeIR;
import com.opengamma.analytics.financial.instrument.index.GeneratorInstrument;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflation;
import com.opengamma.analytics.financial.instrument.index.GeneratorSwapFixedInflationMaster;
import com.opengamma.analytics.financial.instrument.index.IndexPrice;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedInflationZeroCouponDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedONDefinition;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivativeVisitor;
import com.opengamma.analytics.financial.interestrate.LastTimeCalculator;
import com.opengamma.analytics.financial.interestrate.annuity.derivative.Annuity;
import com.opengamma.analytics.financial.interestrate.payments.derivative.Payment;
import com.opengamma.analytics.financial.interestrate.swap.derivative.Swap;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.ParSpreadInflationMarketQuoteDiscountingCalculator;
import com.opengamma.analytics.financial.provider.calculator.inflation.PresentValueDiscountingInflationCalculator;
import com.opengamma.analytics.financial.provider.curve.inflation.InflationDiscountBuildingRepository;
import com.opengamma.analytics.financial.provider.description.MulticurveProviderDiscountDataSets;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderDiscount;
import com.opengamma.analytics.financial.provider.description.inflation.InflationProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderDiscount;
import com.opengamma.analytics.financial.provider.sensitivity.inflation.InflationSensitivity;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolatorFactory;
import com.opengamma.analytics.math.interpolation.Interpolator1D;
import com.opengamma.analytics.math.interpolation.Interpolator1DFactory;
import com.opengamma.financial.convention.calendar.Calendar;
import com.opengamma.financial.convention.calendar.MondayToFridayCalendar;
import com.opengamma.util.money.Currency;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.timeseries.DoubleTimeSeries;
import com.opengamma.util.timeseries.zoneddatetime.ArrayZonedDateTimeDoubleTimeSeries;
import com.opengamma.util.tuple.Pair;
/**
* Build of inflation curves in several blocks with relevant Jacobian matrices.
*/
public class InflationBuildingCurveSimpleTest {
private static final Interpolator1D INTERPOLATOR_LINEAR = CombinedInterpolatorExtrapolatorFactory.getInterpolator(Interpolator1DFactory.LINEAR, Interpolator1DFactory.FLAT_EXTRAPOLATOR,
Interpolator1DFactory.FLAT_EXTRAPOLATOR);
private static final LastTimeCalculator MATURITY_CALCULATOR = LastTimeCalculator.getInstance();
private static final double TOLERANCE_ROOT = 1.0E-10;
private static final int STEP_MAX = 100;
private static final Currency USD = Currency.USD;
private static final Calendar NYC = new MondayToFridayCalendar("NYC");
private static final double NOTIONAL = 1.0;
private static final GeneratorSwapFixedInflation GENERATOR_INFALTION_SWAP = GeneratorSwapFixedInflationMaster.getInstance().getGenerator("USCPI");
private static final IndexPrice US_CPI = GENERATOR_INFALTION_SWAP.getIndexPrice();
private static final ZonedDateTime NOW = DateUtils.getUTCDate(2011, 9, 28);
private static final ArrayZonedDateTimeDoubleTimeSeries TS_EMPTY = new ArrayZonedDateTimeDoubleTimeSeries();
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITH_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27),
DateUtils.getUTCDate(2011, 9, 28) }, new double[] {200, 200 });
private static final ArrayZonedDateTimeDoubleTimeSeries TS_PRICE_INDEX_USD_WITHOUT_TODAY = new ArrayZonedDateTimeDoubleTimeSeries(new ZonedDateTime[] {DateUtils.getUTCDate(2011, 9, 27) },
new double[] {100 });
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITH_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITH_TODAY };
@SuppressWarnings("rawtypes")
private static final DoubleTimeSeries[] TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY = new DoubleTimeSeries[] {TS_PRICE_INDEX_USD_WITHOUT_TODAY };
private static final String CURVE_NAME_CPI_USD = "USD CPI";
<<<<<<< MINE
/** Market values for the CPI USD curve */
public static final double[] CPI_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the CPI USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] CPI_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP,
GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP, GENERATOR_INFALTION_SWAP };
/** Tenors for the CPI USD curve */
public static final Period[] CPI_USD_TENOR = new Period[] {DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(6), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(8), DateUtils.periodOfYears(9), DateUtils.periodOfYears(10), DateUtils.periodOfYears(12), DateUtils.periodOfYears(15), DateUtils.periodOfYears(20),
DateUtils.periodOfYears(25), DateUtils.periodOfYears(30) };
public static final GeneratorAttributeIR[] CPI_USD_ATTR = new GeneratorAttributeIR[CPI_USD_TENOR.length];
=======
/** Market values for the dsc USD curve */
public static final double[] DSC_USD_MARKET_QUOTES = new double[] {0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400, 0.0400 };
/** Generators for the dsc USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] DSC_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_DEPOSIT_ON_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD,
GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD, GENERATOR_OIS_USD };
/** Tenors for the dsc USD curve */
public static final Period[] DSC_USD_TENOR = new Period[] {DateUtils.periodOfDays(0), DateUtils.periodOfMonths(1), DateUtils.periodOfMonths(2), DateUtils.periodOfMonths(3),
DateUtils.periodOfMonths(6), DateUtils.periodOfMonths(9), DateUtils.periodOfYears(1),
DateUtils.periodOfYears(2), DateUtils.periodOfYears(3), DateUtils.periodOfYears(4), DateUtils.periodOfYears(5), DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] DSC_USD_ATTR = new GeneratorAttributeIR[DSC_USD_TENOR.length];
>>>>>>> YOURS
static {
for (int loopins = 0; loopins < CPI_USD_TENOR.length; loopins++) {
CPI_USD_ATTR[loopins] = new GeneratorAttributeIR(CPI_USD_TENOR[loopins]);
}
}
<<<<<<< MINE
/** Standard USD CPI curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_CPI_USD;
=======
/** Market values for the Fwd 3M USD curve */
public static final double[] FWD3_USD_MARKET_QUOTES = new double[] {0.0420, 0.0420, 0.0420, 0.0430, 0.0470, 0.0540, 0.0570, 0.0600 };
/** Generators for the Fwd 3M USD curve */
public static final GeneratorInstrument<? extends GeneratorAttribute>[] FWD3_USD_GENERATORS = new GeneratorInstrument<?>[] {GENERATOR_USDLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M,
USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M, USD6MLIBOR3M };
/** Tenors for the Fwd 3M USD curve */
public static final Period[] FWD3_USD_TENOR = new Period[] {DateUtils.periodOfMonths(0), DateUtils.periodOfMonths(6), DateUtils.periodOfYears(1), DateUtils.periodOfYears(2),
DateUtils.periodOfYears(3), DateUtils.periodOfYears(5), DateUtils.periodOfYears(7),
DateUtils.periodOfYears(10) };
public static final GeneratorAttributeIR[] FWD3_USD_ATTR = new GeneratorAttributeIR[FWD3_USD_TENOR.length];
static {
for (int loopins = 0; loopins < FWD3_USD_TENOR.length; loopins++) {
FWD3_USD_ATTR[loopins] = new GeneratorAttributeIR(FWD3_USD_TENOR[loopins]);
}
}
/** Standard USD discounting curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_DSC_USD;
/** Standard USD Forward 3M curve instrument definitions */
public static final InstrumentDefinition<?>[] DEFINITIONS_FWD3_USD;
>>>>>>> YOURS
/** Units of curves */
public static final int[] NB_UNITS = new int[] {1 };
public static final int NB_BLOCKS = NB_UNITS.length;
public static final InstrumentDefinition<?>[][][][] DEFINITIONS_UNITS = new InstrumentDefinition<?>[NB_BLOCKS][][][];
public static final GeneratorPriceIndexCurve[][][] GENERATORS_UNITS = new GeneratorPriceIndexCurve[NB_BLOCKS][][];
public static final String[][][] NAMES_UNITS = new String[NB_BLOCKS][][];
public static final MulticurveProviderDiscount usMulticurveProviderDiscount = MulticurveProviderDiscountDataSets.createMulticurveEurUsd().copy();
public static final InflationProviderDiscount KNOWN_DATA = new InflationProviderDiscount(usMulticurveProviderDiscount);
public static final LinkedHashMap<String, IndexPrice[]> US_CPI_MAP = new LinkedHashMap<String, IndexPrice[]>();
static {
DEFINITIONS_CPI_USD = getDefinitions(CPI_USD_MARKET_QUOTES, CPI_USD_GENERATORS, CPI_USD_ATTR);
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
DEFINITIONS_UNITS[loopblock] = new InstrumentDefinition<?>[NB_UNITS[loopblock]][][];
GENERATORS_UNITS[loopblock] = new GeneratorPriceIndexCurve[NB_UNITS[loopblock]][];
NAMES_UNITS[loopblock] = new String[NB_UNITS[loopblock]][];
}
DEFINITIONS_UNITS[0][0] = new InstrumentDefinition<?>[][] {DEFINITIONS_CPI_USD };
final GeneratorPriceIndexCurve genIntLin = new GeneratorPriceIndexCurveInterpolated(MATURITY_CALCULATOR, INTERPOLATOR_LINEAR);
GENERATORS_UNITS[0][0] = new GeneratorPriceIndexCurve[] {genIntLin };
NAMES_UNITS[0][0] = new String[] {CURVE_NAME_CPI_USD };
US_CPI_MAP.put(CURVE_NAME_CPI_USD, new IndexPrice[] {US_CPI });
}
public static final String NOT_USED = "Not used";
public static final String[] NOT_USED_2 = {NOT_USED, NOT_USED };
public static InstrumentDefinition<?>[] getDefinitions(final double[] marketQuotes, final GeneratorInstrument[] generators, final GeneratorAttribute[] attribute) {
final InstrumentDefinition<?>[] definitions = new InstrumentDefinition<?>[marketQuotes.length];
for (int loopmv = 0; loopmv < marketQuotes.length; loopmv++) {
definitions[loopmv] = generators[loopmv].generateInstrument(NOW, marketQuotes[loopmv], NOTIONAL, attribute[loopmv]);
}
return definitions;
}
private static List<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>> CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK = new ArrayList<Pair<InflationProviderDiscount, CurveBuildingBlockBundle>>();
// Calculator
private static final PresentValueDiscountingInflationCalculator PVIC = PresentValueDiscountingInflationCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteDiscountingCalculator PSIMQC = ParSpreadInflationMarketQuoteDiscountingCalculator.getInstance();
private static final ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator PSIMQCSC = ParSpreadInflationMarketQuoteCurveSensitivityDiscountingCalculator.getInstance();
private static final InflationDiscountBuildingRepository CURVE_BUILDING_REPOSITORY = new InflationDiscountBuildingRepository(TOLERANCE_ROOT, TOLERANCE_ROOT, STEP_MAX);
private static final double TOLERANCE_CAL = 1.0E-9;
@BeforeSuite
static void initClass() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.add(makeCurvesFromDefinitions(DEFINITIONS_UNITS[loopblock], GENERATORS_UNITS[loopblock], NAMES_UNITS[loopblock], KNOWN_DATA, PSIMQC, PSIMQCSC, false));
}
}
@Test
public void curveConstructionGeneratorOtherBlocks() {
for (int loopblock = 0; loopblock < NB_BLOCKS; loopblock++) {
curveConstructionTest(DEFINITIONS_UNITS[loopblock], CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(loopblock).getFirst(), false, loopblock);
}
}
public void curveConstructionTest(final InstrumentDefinition<?>[][][] definitions, final InflationProviderDiscount curves, final boolean withToday, final int block) {
final int nbBlocks = definitions.length;
for (int loopblock = 0; loopblock < nbBlocks; loopblock++) {
final InstrumentDerivative[][] instruments = convert(definitions[loopblock], loopblock, withToday);
final double[][] pv = new double[instruments.length][];
for (int loopcurve = 0; loopcurve < instruments.length; loopcurve++) {
pv[loopcurve] = new double[instruments[loopcurve].length];
for (int loopins = 0; loopins < instruments[loopcurve].length; loopins++) {
pv[loopcurve][loopins] = curves.getFxRates().convert(instruments[loopcurve][loopins].accept(PVIC, curves), USD).getAmount();
assertEquals("Curve construction: block " + block + ", unit " + loopblock + " - instrument " + loopins, 0, pv[loopcurve][loopins], TOLERANCE_CAL);
}
}
}
}
/* @Test(enabled = true)
*//**
* Analyzes the shape of the curve.
*/
/*
public void forwardAnalysis() {
<<<<<<< MINE
final InflationProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW,2, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
try {
final FileWriter writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, US_CPI);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
=======
final MulticurveProviderInterface marketDsc = CURVES_PAR_SPREAD_MQ_WITHOUT_TODAY_BLOCK.get(0).getFirst();
final int jump = 1;
final int startIndex = 0;
final int nbDate = 2750;
ZonedDateTime startDate = ScheduleCalculator.getAdjustedDate(NOW, USDLIBOR3M.getSpotLag() + startIndex * jump, NYC);
final double[] rateDsc = new double[nbDate];
final double[] startTime = new double[nbDate];
FileWriter writer = null;
try {
writer = new FileWriter("fwd-dsc.csv");
for (int loopdate = 0; loopdate < nbDate; loopdate++) {
startTime[loopdate] = TimeCalculator.getTimeBetween(NOW, startDate);
final ZonedDateTime endDate = ScheduleCalculator.getAdjustedDate(startDate, USDLIBOR3M);
final double endTime = TimeCalculator.getTimeBetween(NOW, endDate);
final double accrualFactor = USDLIBOR3M.getDayCount().getDayCountFraction(startDate, endDate);
rateDsc[loopdate] = marketDsc.getForwardRate(USDLIBOR3M, startTime[loopdate], endTime, accrualFactor);
startDate = ScheduleCalculator.getAdjustedDate(startDate, jump, NYC);
writer.append(0.0 + "," + startTime[loopdate] + "," + rateDsc[loopdate] + "\n");
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
if (writer != null) {
IOUtils.closeQuietly(writer);
}
} finally {
if (writer != null) {
IOUtils.closeQuietly(writer);
}
}
>>>>>>> YOURS
}
writer.flush();
writer.close();
} catch (final IOException e) {
e.printStackTrace();
}
}*/
private static Pair<InflationProviderDiscount, CurveBuildingBlockBundle> makeCurvesFromDefinitions(final InstrumentDefinition<?>[][][] definitions,
final GeneratorPriceIndexCurve[][] curveGenerators,
final String[][] curveNames, final InflationProviderDiscount knownData, final InstrumentDerivativeVisitor<InflationProviderInterface, Double> calculator,
final InstrumentDerivativeVisitor<InflationProviderInterface, InflationSensitivity> sensitivityCalculator, final boolean withToday)
{
final int nbUnits = curveGenerators.length;
final double[][] parametersGuess = new double[nbUnits][];
final GeneratorPriceIndexCurve[][] generatorFinal = new GeneratorPriceIndexCurve[nbUnits][];
final InstrumentDerivative[][][] instruments = new InstrumentDerivative[nbUnits][][];
for (int loopunit = 0; loopunit < nbUnits; loopunit++) {
generatorFinal[loopunit] = new GeneratorPriceIndexCurve[curveGenerators[loopunit].length];
int nbInsUnit = 0;
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
nbInsUnit += definitions[loopunit][loopcurve].length;
}
parametersGuess[loopunit] = new double[nbInsUnit];
int startCurve = 0; // First parameter index of the curve in the unit.
instruments[loopunit] = convert(definitions[loopunit], loopunit, withToday);
for (int loopcurve = 0; loopcurve < curveGenerators[loopunit].length; loopcurve++) {
generatorFinal[loopunit][loopcurve] = curveGenerators[loopunit][loopcurve].finalGenerator(instruments[loopunit][loopcurve]);
final double[] guessCurve = generatorFinal[loopunit][loopcurve].initialGuess(initialGuess(definitions[loopunit][loopcurve]));
System.arraycopy(guessCurve, 0, parametersGuess[loopunit], startCurve, instruments[loopunit][loopcurve].length);
startCurve += instruments[loopunit][loopcurve].length;
}
}
return CURVE_BUILDING_REPOSITORY.makeCurvesFromDerivatives(instruments, generatorFinal, curveNames, parametersGuess, knownData, US_CPI_MAP, calculator,
sensitivityCalculator);
}
@SuppressWarnings("unchecked")
private static InstrumentDerivative[][] convert(final InstrumentDefinition<?>[][] definitions, final int unit, final boolean withToday) {
// int nbDef = 0;
// for (final InstrumentDefinition<?>[] definition : definitions) {
// nbDef += definition.length;
// }
final InstrumentDerivative[][] instruments = new InstrumentDerivative[definitions.length][];
for (int loopcurve = 0; loopcurve < definitions.length; loopcurve++) {
instruments[loopcurve] = new InstrumentDerivative[definitions[loopcurve].length];
int loopins = 0;
for (final InstrumentDefinition<?> instrument : definitions[loopcurve]) {
InstrumentDerivative ird;
if (instrument instanceof SwapFixedInflationZeroCouponDefinition) {
/* ird = ((SwapFixedInflationZeroCouponDefinition) instrument).toDerivative(NOW, getTSSwapFixedInflation(withToday, unit), NOT_USED_2);*/
final Annuity<? extends Payment> ird1 = ((SwapFixedInflationZeroCouponDefinition) instrument).getFirstLeg().toDerivative(NOW, NOT_USED_2);
final Annuity<? extends Payment> ird2 = ((SwapFixedInflationZeroCouponDefinition) instrument).getSecondLeg().toDerivative(NOW, TS_PRICE_INDEX_USD_WITH_TODAY, NOT_USED_2);
ird = new Swap(ird1, ird2);
}
else {
ird = instrument.toDerivative(NOW, NOT_USED_2);
}
instruments[loopcurve][loopins++] = ird;
}
}
return instruments;
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedInflation(final Boolean withToday, final Integer unit) {
switch (unit) {
case 0:
<<<<<<< MINE
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
=======
return withToday ? TS_FIXED_OIS_USD_WITH_TODAY : TS_FIXED_OIS_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
@SuppressWarnings("rawtypes")
private static DoubleTimeSeries[] getTSSwapFixedIbor(final Boolean withToday, final Integer unit) {
//REVIEW is it intended that the first two branches of the switch statement do the same thing
switch (unit) {
case 0:
return withToday ? TS_FIXED_IBOR_USD3M_WITH_TODAY : TS_FIXED_IBOR_USD3M_WITHOUT_TODAY;
>>>>>>> YOURS
case 1:
return withToday ? TS_FIXED_PRICE_INDEX_USD_WITH_TODAY : TS_FIXED_PRICE_INDEX_USD_WITHOUT_TODAY;
default:
throw new IllegalArgumentException(unit.toString());
}
}
private static double[] initialGuess(final InstrumentDefinition<?>[] definitions) {
final double[] result = new double[definitions.length];
int loopr = 0;
for (final InstrumentDefinition<?> definition : definitions) {
result[loopr++] = initialGuess(definition);
}
return result;
}
private static double initialGuess(final InstrumentDefinition<?> instrument) {
if (instrument instanceof SwapFixedONDefinition) {
return ((SwapFixedONDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof SwapFixedIborDefinition) {
return ((SwapFixedIborDefinition) instrument).getFixedLeg().getNthPayment(0).getRate();
}
if (instrument instanceof ForwardRateAgreementDefinition) {
return ((ForwardRateAgreementDefinition) instrument).getRate();
}
if (instrument instanceof CashDefinition) {
return ((CashDefinition) instrument).getRate();
}
return 200;
}
}
Diff Result
No diff
Case 77 - java_ogplatform.rev_ec428_07d48..RateLimitingMergingViewProcessListenerTest.java
Base
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValue("value" + value, value));
return deltaResult;
}
private ComputedValue getComputedValue(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValue(new ValueSpecification(valueRequirement, "FunctionId"), value);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValue("value" + value, value));
return deltaResult;
}
private ComputedValue getComputedValue(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValue(new ValueSpecification(valueRequirement, "FunctionId"), value);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
Left
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValueResult(new ValueSpecification(valueRequirement, "FunctionId"), value, ExecutionLog.EMPTY);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetType;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValueResult(new ValueSpecification(valueRequirement, "FunctionId"), value, ExecutionLog.EMPTY);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
Right
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValue("value" + value, value));
return deltaResult;
}
private ComputedValue getComputedValue(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValue("value" + value, value));
return deltaResult;
}
private ComputedValue getComputedValue(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
MergeMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep(2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep(2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValueResult(new ValueSpecification(valueRequirement, "FunctionId"), value, ExecutionLog.EMPTY);
}
private ComputedValue getComputedValue(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep(2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValueResult(new ValueSpecification(valueRequirement, "FunctionId"), value, ExecutionLog.EMPTY);
}
private ComputedValue getComputedValue(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep(2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
Safe
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
<<<<<<< MINE
private ComputedValue getComputedValue(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
}
=======
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValueResult(new ValueSpecification(valueRequirement, "FunctionId"), value, ExecutionLog.EMPTY);
}
>>>>>>> YOURS
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
<<<<<<< MINE
private ComputedValue getComputedValue(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
}
=======
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValueResult(new ValueSpecification(valueRequirement, "FunctionId"), value, ExecutionLog.EMPTY);
}
>>>>>>> YOURS
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
Unstructured
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
<<<<<<< MINE
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueRequirement;
=======
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
>>>>>>> YOURS
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
<<<<<<< MINE
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValueResult(new ValueSpecification(valueRequirement, "FunctionId"), value, ExecutionLog.EMPTY);
=======
private ComputedValue getComputedValue(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
>>>>>>> YOURS
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.client.merging;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.test.TestViewResultListener;
<<<<<<< MINE
import com.opengamma.engine.value.ComputedValueResult;
import com.opengamma.engine.value.ValueRequirement;
=======
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValuePropertyNames;
>>>>>>> YOURS
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ExecutionLog;
import com.opengamma.engine.view.InMemoryViewDeltaResultModel;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDeltaResultModel;
import com.opengamma.engine.view.ViewResultEntry;
import com.opengamma.engine.view.calc.EngineResourceManagerImpl;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.listener.ViewResultListener;
import com.opengamma.id.UniqueId;
import com.opengamma.util.test.Timeout;
import com.opengamma.util.tuple.Pair;
/**
* Tests RateLimitingMergingUpdateProvider
*/
@Test
public class RateLimitingMergingViewProcessListenerTest {
private static final Logger s_logger = LoggerFactory.getLogger(RateLimitingMergingViewProcessListenerTest.class);
@Test
public void testPassThrough() {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
// OK, it doesn't really test the 'synchronous' bit, but it at least checks that no merging has happened.
addCompile(mergingListener);
addResults(mergingListener, 1000);
testListener.assertViewDefinitionCompiled();
testListener.assertMultipleCycleCompleted(1000);
testListener.assertNoCalls();
mergingListener.setPaused(true);
addResults(mergingListener, 1000);
testListener.assertNoCalls();
mergingListener.setPaused(false);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.setPaused(false);
addResults(mergingListener, 1000);
testListener.assertMultipleCycleCompleted(1000);
mergingListener.processTerminated(false);
testListener.assertProcessTerminated();
testListener.assertNoCalls();
mergingListener.processTerminated(false);
}
@Test
public void testMergingWhenRateLimiting() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setMinimumUpdatePeriodMillis(500);
addResults(mergingListener, 1000);
Thread.sleep(500);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
mergingListener.terminate();
}
@Test
public void testModifiableUpdatePeriod() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
assertCorrectUpdateRate(mergingListener, testListener, 100);
assertCorrectUpdateRate(mergingListener, testListener, 400);
assertCorrectUpdateRate(mergingListener, testListener, 50);
mergingListener.terminate();
}
@Test
public void testCallOrderingAndCollapsing() throws InterruptedException {
TestViewResultListener testListener = new TestViewResultListener();
RateLimitingMergingViewProcessListener mergingListener = new RateLimitingMergingViewProcessListener(testListener, mock(EngineResourceManagerImpl.class), new Timer("Custom timer"));
mergingListener.setPaused(true);
testListener.assertNoCalls();
// Begin sequence while paused
addCompile(mergingListener);
addResults(mergingListener, 10);
CompiledViewDefinitionWithGraphsImpl preCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(preCompilation, true);
addResults(mergingListener, 10);
mergingListener.cycleCompleted(mock(ViewComputationResultModel.class), getDeltaResult(1));
ViewComputationResultModel latestResult = mock(ViewComputationResultModel.class);
mergingListener.cycleCompleted(latestResult, getDeltaResult(2));
CompiledViewDefinitionWithGraphsImpl postCompilation = mock(CompiledViewDefinitionWithGraphsImpl.class);
mergingListener.viewDefinitionCompiled(postCompilation, true);
mergingListener.processCompleted();
mergingListener.processTerminated(false);
// End of sequence while paused
mergingListener.setPaused(false);
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis());
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), preCompilation);
ViewDeltaResultModel mergedDelta = testListener.getCycleCompleted(Timeout.standardTimeoutMillis()).getDeltaResult();
assertEquals(2, mergedDelta.getAllResults().size());
Set<Pair<String, Integer>> results = new HashSet<Pair<String, Integer>>();
for (ViewResultEntry deltaItem : mergedDelta.getAllResults()) {
results.add(Pair.of(deltaItem.getComputedValue().getSpecification().getValueName(), (Integer) deltaItem.getComputedValue().getValue()));
}
assertTrue(results.contains(Pair.of("value1", 1)));
assertTrue(results.contains(Pair.of("value2", 2)));
testListener.assertViewDefinitionCompiled(Timeout.standardTimeoutMillis(), postCompilation);
testListener.assertProcessCompleted();
testListener.assertProcessTerminated();
testListener.assertNoCalls();
}
private ViewDeltaResultModel getDeltaResult(int value) {
InMemoryViewDeltaResultModel deltaResult = new InMemoryViewDeltaResultModel();
deltaResult.addValue("DEFAULT", getComputedValueResult("value" + value, value));
return deltaResult;
}
<<<<<<< MINE
private ComputedValueResult getComputedValueResult(String valueName, Object value) {
UniqueId uniqueId = UniqueId.of("Scheme", valueName);
ValueRequirement valueRequirement = new ValueRequirement(valueName, ComputationTargetType.PRIMITIVE, uniqueId);
return new ComputedValueResult(new ValueSpecification(valueRequirement, "FunctionId"), value, ExecutionLog.EMPTY);
=======
private ComputedValue getComputedValue(String valueName, Object value) {
final ComputationTargetSpecification target = ComputationTargetSpecification.of(UniqueId.of("Scheme", valueName));
return new ComputedValue(new ValueSpecification(valueName, target, ValueProperties.with(ValuePropertyNames.FUNCTION, "FunctionId").get()), value);
>>>>>>> YOURS
}
private void assertCorrectUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
mergingListener.setMinimumUpdatePeriodMillis(period);
assertUpdateRate(mergingListener, testListener, period);
// If the provider is paused then all updates should be merged regardless of the time elapsed or the rate
mergingListener.setPaused(true);
for (int i = 0; i < 3; i++) {
addResults(mergingListener, 10);
Thread.sleep(period);
}
testListener.assertNoCalls();
mergingListener.setPaused(false);
Thread.sleep(2 * period);
testListener.assertCycleCompleted();
testListener.assertNoCalls();
// Once unpaused, everything should be back to normal
assertUpdateRate(mergingListener, testListener, period);
}
private void assertUpdateRate(RateLimitingMergingViewProcessListener mergingListener, TestViewResultListener testListener, int period) throws InterruptedException {
testListener.resetShortestDelay();
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
addResults(mergingListener, 10);
}
// Wait a couple of periods for any stragglers
Thread.sleep (2 * period);
// Check that the results didn't come any faster than we asked for (give or take 10%), and not too slowly (allow up to twice)
assertTrue ("Expecting results no faster than " + period + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() >= (period - period / 10));
assertTrue ("Expecting results no slower than " + (period * 2) + " ms, but got a result after " + testListener.getShortestDelay() + " ms", testListener.getShortestDelay() <= (period * 2));
s_logger.info("Size = {}", testListener.getQueueSize());
testListener.clear();
}
private void addResults(ViewResultListener listener, int count) {
for (int i = 0; i < count; i++) {
listener.cycleCompleted(mock(ViewComputationResultModel.class), null);
}
}
private void addCompile(ViewResultListener listener) {
listener.viewDefinitionCompiled(mock(CompiledViewDefinitionWithGraphsImpl.class), true);
}
}
Diff Result
No diff
Case 78 - java_ogplatform.rev_fd0a3_f9806..View.java
Base
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
checkIsEntitledToResults(resultListener.getUser());
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
checkIsEntitledToResults(deltaListener.getUser());
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions(getViewEvaluationModel().getAllLiveDataRequirements());
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions(Set<ValueRequirement> liveDataRequirements) {
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement requirement) {
Set<ValueRequirement> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(requirement)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
public boolean isEntitledToAccess(UserPrincipal user) {
try {
checkIsEntitledToAccess(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToAccess(UserPrincipal user) {
// not done yet
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
public boolean isEntitledToResults(UserPrincipal user) {
try {
checkIsEntitledToResults(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToResults(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(
requiredValues,
getProcessingContext().getSecuritySource());
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this +
" because they do not have permissions to " + failures.get(0));
}
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
checkIsEntitledToResults(resultListener.getUser());
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
checkIsEntitledToResults(deltaListener.getUser());
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions(getViewEvaluationModel().getAllLiveDataRequirements());
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions(Set<ValueRequirement> liveDataRequirements) {
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement requirement) {
Set<ValueRequirement> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(requirement)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
public boolean isEntitledToAccess(UserPrincipal user) {
try {
checkIsEntitledToAccess(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToAccess(UserPrincipal user) {
// not done yet
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
public boolean isEntitledToResults(UserPrincipal user) {
try {
checkIsEntitledToResults(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToResults(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(
requiredValues,
getProcessingContext().getSecuritySource());
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this +
" because they do not have permissions to " + failures.get(0));
}
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
Left
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions(getViewEvaluationModel().getAllLiveDataRequirements());
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions(Set<ValueRequirement> liveDataRequirements) {
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement requirement) {
Set<ValueRequirement> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(requirement)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Checks that the given user has access to every market data line required to compute the results of the view, and
* throws an exception if this is not the case.
*
* @param user the user
* @throws ViewPermissionException if any entitlement problems are found
*/
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(
requiredValues,
getProcessingContext().getSecuritySource());
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewPermissionException(user + " is not entitled to the output of " + this +
" because they do not have permissions to " + failures.get(0));
}
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions(getViewEvaluationModel().getAllLiveDataRequirements());
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions(Set<ValueRequirement> liveDataRequirements) {
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement requirement) {
Set<ValueRequirement> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(requirement)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Checks that the given user has access to every market data line required to compute the results of the view, and
* throws an exception if this is not the case.
*
* @param user the user
* @throws ViewPermissionException if any entitlement problems are found
*/
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(
requiredValues,
getProcessingContext().getSecuritySource());
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewPermissionException(user + " is not entitled to the output of " + this +
" because they do not have permissions to " + failures.get(0));
}
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
Right
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
checkIsEntitledToResults(resultListener.getUser());
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
checkIsEntitledToResults(deltaListener.getUser());
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
public boolean isEntitledToAccess(UserPrincipal user) {
try {
checkIsEntitledToAccess(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToAccess(UserPrincipal user) {
// not done yet
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
public boolean isEntitledToResults(UserPrincipal user) {
try {
checkIsEntitledToResults(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToResults(UserPrincipal user) {
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this +
" because they do not have permissions to " + failures.get(0));
}
}
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
checkIsEntitledToResults(resultListener.getUser());
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
checkIsEntitledToResults(deltaListener.getUser());
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
public boolean isEntitledToAccess(UserPrincipal user) {
try {
checkIsEntitledToAccess(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToAccess(UserPrincipal user) {
// not done yet
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
public boolean isEntitledToResults(UserPrincipal user) {
try {
checkIsEntitledToResults(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToResults(UserPrincipal user) {
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this +
" because they do not have permissions to " + failures.get(0));
}
}
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
MergeMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(ViewComputationResultModelImpl previousResult, ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(ViewDeltaResultModelImpl deltaModel, ComputationTargetSpecification targetSpec, ViewComputationResultModelImpl previousResult, ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(ViewDeltaResultModelImpl deltaModel, ComputationTargetSpecification targetSpec, String calcConfigName, ViewCalculationResultModel previousCalcModel, ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED) || (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this + " because they do not have permissions to " + failures.get(0));
}
}
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(ViewComputationResultModelImpl previousResult, ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(ViewDeltaResultModelImpl deltaModel, ComputationTargetSpecification targetSpec, ViewComputationResultModelImpl previousResult, ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(ViewDeltaResultModelImpl deltaModel, ComputationTargetSpecification targetSpec, String calcConfigName, ViewCalculationResultModel previousCalcModel, ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED) || (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this + " because they do not have permissions to " + failures.get(0));
}
}
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
KeepBothMethods
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(ViewComputationResultModelImpl previousResult, ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(ViewDeltaResultModelImpl deltaModel, ComputationTargetSpecification targetSpec, ViewComputationResultModelImpl previousResult, ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(ViewDeltaResultModelImpl deltaModel, ComputationTargetSpecification targetSpec, String calcConfigName, ViewCalculationResultModel previousCalcModel, ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED) || (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Checks that the given user has access to every market data line required to compute the results of the view, and
* throws an exception if this is not the case.
*
* @param user the user
* @throws ViewPermissionException if any entitlement problems are found
*/
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(requiredValues, getProcessingContext().getSecuritySource());
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewPermissionException(user + " is not entitled to the output of " + this + " because they do not have permissions to " + failures.get(0));
}
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToResults(UserPrincipal user) {
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this + " because they do not have permissions to " + failures.get(0));
}
}
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(ViewComputationResultModelImpl previousResult, ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(ViewDeltaResultModelImpl deltaModel, ComputationTargetSpecification targetSpec, ViewComputationResultModelImpl previousResult, ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(ViewDeltaResultModelImpl deltaModel, ComputationTargetSpecification targetSpec, String calcConfigName, ViewCalculationResultModel previousCalcModel, ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED) || (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Checks that the given user has access to every market data line required to compute the results of the view, and
* throws an exception if this is not the case.
*
* @param user the user
* @throws ViewPermissionException if any entitlement problems are found
*/
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(requiredValues, getProcessingContext().getSecuritySource());
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewPermissionException(user + " is not entitled to the output of " + this + " because they do not have permissions to " + failures.get(0));
}
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToResults(UserPrincipal user) {
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this + " because they do not have permissions to " + failures.get(0));
}
}
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
Safe
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
<<<<<<< MINE
public void checkIsEntitledToResults(UserPrincipal user) {
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this +
" because they do not have permissions to " + failures.get(0));
}
}
=======
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(
requiredValues,
getProcessingContext().getSecuritySource());
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewPermissionException(user + " is not entitled to the output of " + this +
" because they do not have permissions to " + failures.get(0));
}
}
>>>>>>> YOURS
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* static contents of the view. false otherwise.
*/
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
<<<<<<< MINE
public void checkIsEntitledToResults(UserPrincipal user) {
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewAccessException(user + " is not entitled to " + this +
" because they do not have permissions to " + failures.get(0));
}
}
=======
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(
requiredValues,
getProcessingContext().getSecuritySource());
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewPermissionException(user + " is not entitled to the output of " + this +
" because they do not have permissions to " + failures.get(0));
}
}
>>>>>>> YOURS
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
Unstructured
/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Checks that the given user has access to every market data line required to compute the results of the view, and
* throws an exception if this is not the case.
*
* @param user the user
* @throws ViewPermissionException if any entitlement problems are found
*/
<<<<<<< MINE
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(
requiredValues,
getProcessingContext().getSecuritySource());
=======
public boolean isEntitledToAccess(UserPrincipal user) {
try {
checkIsEntitledToAccess(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToAccess(UserPrincipal user) {
// not done yet
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
public boolean isEntitledToResults(UserPrincipal user) {
try {
checkIsEntitledToResults(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToResults(UserPrincipal user) {
>>>>>>> YOURS
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewPermissionException(user + " is not entitled to the output of " + this +
" because they do not have permissions to " + failures.get(0));
}
}
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}/**
* Copyright (C) 2009 - 2009 by OpenGamma Inc.
*
* Please see distribution for license.
*/
package com.opengamma.engine.view;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.Lifecycle;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.livedata.LiveDataSnapshotListener;
import com.opengamma.engine.livedata.LiveDataSnapshotProvider;
import com.opengamma.engine.position.Portfolio;
import com.opengamma.engine.position.PortfolioNode;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.calc.SingleComputationCycle;
import com.opengamma.engine.view.calc.ViewRecalculationJob;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.compilation.ViewEvaluationModel;
import com.opengamma.engine.view.permission.ViewPermission;
import com.opengamma.engine.view.permission.ViewPermissionException;
import com.opengamma.engine.view.permission.ViewPermissionProvider;
import com.opengamma.livedata.LiveDataSpecification;
import com.opengamma.livedata.msg.UserPrincipal;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.ThreadUtil;
import com.opengamma.util.monitor.OperationTimer;
/**
* The base implementation of the {@link View} interface.
*/
public class View implements Lifecycle, LiveDataSnapshotListener {
private static final Logger s_logger = LoggerFactory.getLogger(View.class);
// Injected dependencies:
private final ViewDefinition _definition;
private final ViewProcessingContext _processingContext;
// Internal State:
private ViewEvaluationModel _viewEvaluationModel;
private Thread _recalculationThread;
private ViewCalculationState _calculationState = ViewCalculationState.NOT_INITIALIZED;
private ViewRecalculationJob _recalcJob;
private ViewComputationResultModelImpl _mostRecentResult;
private final Set<ComputationResultListener> _resultListeners = new CopyOnWriteArraySet<ComputationResultListener>();
private final Set<DeltaComputationResultListener> _deltaListeners = new CopyOnWriteArraySet<DeltaComputationResultListener>();
private volatile boolean _populateResultModel = true;
public View(ViewDefinition definition, ViewProcessingContext processingContext) {
if (definition == null) {
throw new NullPointerException("Must provide a definition.");
}
if (processingContext == null) {
throw new NullPointerException("Must provide a processing context.");
}
_definition = definition;
_processingContext = processingContext;
}
/**
* @return the definition
*/
public ViewDefinition getDefinition() {
return _definition;
}
/**
* @return the processingContext
*/
public ViewProcessingContext getProcessingContext() {
return _processingContext;
}
/**
* @return the recalculationThread
*/
public Thread getRecalculationThread() {
return _recalculationThread;
}
/**
* @param recalculationThread the recalculationThread to set
*/
protected void setRecalculationThread(Thread recalculationThread) {
_recalculationThread = recalculationThread;
}
/**
* @return the calculationState
*/
public ViewCalculationState getCalculationState() {
return _calculationState;
}
/**
* @param calculationState the calculationState to set
*/
protected void setCalculationState(ViewCalculationState calculationState) {
_calculationState = calculationState;
}
/**
* @return the recalcJob
*/
public ViewRecalculationJob getRecalcJob() {
return _recalcJob;
}
/**
* @param recalcJob the recalcJob to set
*/
protected void setRecalcJob(ViewRecalculationJob recalcJob) {
_recalcJob = recalcJob;
}
/**
* @return the latest view evaluation model
*/
public ViewEvaluationModel getViewEvaluationModel() {
return _viewEvaluationModel;
}
public ViewPermissionProvider getPermissionProvider() {
return getProcessingContext().getPermissionProvider();
}
public void addResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, resultListener.getUser(), this);
_resultListeners.add(resultListener);
}
public void removeResultListener(ComputationResultListener resultListener) {
ArgumentChecker.notNull(resultListener, "Result listener");
_resultListeners.remove(resultListener);
}
public void addDeltaResultListener(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
getPermissionProvider().assertPermission(ViewPermission.READ_RESULTS, deltaListener.getUser(), this);
_deltaListeners.add(deltaListener);
}
public void removeDeltaResultLister(DeltaComputationResultListener deltaListener) {
ArgumentChecker.notNull(deltaListener, "Delta listener");
_deltaListeners.remove(deltaListener);
}
public String getName() {
return getDefinition().getName();
}
public Set<ComputationTargetSpecification> getAllComputationTargets() {
return getViewEvaluationModel().getAllComputationTargets();
}
public synchronized void init() {
OperationTimer timer = new OperationTimer(s_logger, "Initializing view {}", getDefinition().getName());
setCalculationState(ViewCalculationState.INITIALIZING);
_viewEvaluationModel = ViewDefinitionCompiler.compile(getDefinition(), getProcessingContext().asCompilationServices());
addLiveDataSubscriptions();
setCalculationState(ViewCalculationState.NOT_STARTED);
timer.finished();
}
/**
* Adds live data subscriptions to the view.
*/
private void addLiveDataSubscriptions() {
Set<ValueRequirement> liveDataRequirements = getRequiredLiveData();
OperationTimer timer = new OperationTimer(s_logger, "Adding {} live data subscriptions for portfolio {}", liveDataRequirements.size(), getDefinition().getPortfolioId());
LiveDataSnapshotProvider snapshotProvider = getProcessingContext().getLiveDataSnapshotProvider();
snapshotProvider.addListener(this);
snapshotProvider.addSubscription(getDefinition().getLiveDataUser(), liveDataRequirements);
timer.finished();
}
@Override
public void subscriptionFailed(ValueRequirement requirement, String msg) {
}
@Override
public void subscriptionStopped(ValueRequirement requirement) {
}
@Override
public void subscriptionSucceeded(ValueRequirement requirement) {
}
@Override
public void valueChanged(ValueRequirement value) {
Set<ValueSpecification> liveDataRequirements = getViewEvaluationModel().getAllLiveDataRequirements();
ViewRecalculationJob recalcJob = getRecalcJob();
if (recalcJob != null && liveDataRequirements.contains(value)) {
recalcJob.liveDataChanged();
}
}
public synchronized ViewComputationResultModel getMostRecentResult() {
return _mostRecentResult;
}
public Portfolio getPortfolio() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio();
}
public PortfolioNode getPositionRoot() {
if (getViewEvaluationModel() == null) {
return null;
}
return getViewEvaluationModel().getPortfolio().getRootNode();
}
public synchronized void recalculationPerformed(ViewComputationResultModelImpl result) {
// REVIEW kirk 2009-09-24 -- We need to consider this method for background execution
// of some kind. It's synchronized and blocks the recalc thread, so a slow
// callback implementation (or just the cost of computing the delta model) will
// be an unnecessary burden. Have to factor in some type of win there.
s_logger.debug("Recalculation Performed called.");
// We swap these first so that in the callback the view is consistent.
ViewComputationResultModelImpl previousResult = _mostRecentResult;
_mostRecentResult = result;
for (ComputationResultListener resultListener : _resultListeners) {
resultListener.computationResultAvailable(result);
}
if (!_deltaListeners.isEmpty() && (previousResult != null)) {
ViewDeltaResultModel deltaModel = computeDeltaModel(previousResult, result);
for (DeltaComputationResultListener deltaListener : _deltaListeners) {
deltaListener.deltaResultAvailable(deltaModel);
}
}
}
/**
* @param previousResult
* @param result
* @return
*/
private ViewDeltaResultModel computeDeltaModel(
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
ViewDeltaResultModelImpl deltaModel = new ViewDeltaResultModelImpl();
deltaModel.setValuationTime(result.getValuationTime());
deltaModel.setResultTimestamp(result.getResultTimestamp());
deltaModel.setPreviousResultTimestamp(previousResult.getResultTimestamp());
deltaModel.setCalculationConfigurationNames(result.getCalculationConfigurationNames());
for (ComputationTargetSpecification targetSpec : result.getAllTargets()) {
computeDeltaModel(deltaModel, targetSpec, previousResult, result);
}
return deltaModel;
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
ViewComputationResultModelImpl previousResult,
ViewComputationResultModelImpl result) {
for (String calcConfigName : result.getCalculationConfigurationNames()) {
ViewCalculationResultModel resultCalcModel = result.getCalculationResult(calcConfigName);
ViewCalculationResultModel previousCalcModel = previousResult.getCalculationResult(calcConfigName);
computeDeltaModel(deltaModel, targetSpec, calcConfigName, previousCalcModel, resultCalcModel);
}
}
private void computeDeltaModel(
ViewDeltaResultModelImpl deltaModel,
ComputationTargetSpecification targetSpec,
String calcConfigName,
ViewCalculationResultModel previousCalcModel,
ViewCalculationResultModel resultCalcModel) {
if (previousCalcModel == null) {
// Everything is new/delta because this is a new calculation context.
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
Map<String, ComputedValue> resultValues = resultCalcModel.getValues(targetSpec);
Map<String, ComputedValue> previousValues = previousCalcModel.getValues(targetSpec);
if (previousValues == null) {
// Everything is new/delta because this is a new target.
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
} else {
// Have to individual delta.
DeltaDefinition deltaDefinition = getDefinition().getCalculationConfiguration(calcConfigName).getDeltaDefinition();
for (Map.Entry<String, ComputedValue> resultEntry : resultValues.entrySet()) {
ComputedValue resultValue = resultEntry.getValue();
ComputedValue previousValue = previousValues.get(resultEntry.getKey());
// REVIEW jonathan 2010-05-07 -- The previous value that we're comparing with is the value from the last
// computation cycle, not the value that we last emitted as a delta. It is therefore important that the
// DeltaComparers take this into account in their implementation of isDelta. E.g. they should compare the
// values after truncation to the required decimal place, rather than testing whether the difference of the
// full values is greater than some threshold; this way, there will always be a point beyond which a change
// is detected, even in the event of gradual creep.
if (deltaDefinition.isDelta(previousValue, resultValue)) {
deltaModel.addValue(calcConfigName, resultEntry.getValue());
}
}
}
}
}
// REVIEW kirk 2009-09-11 -- Need to resolve the synchronization on the lifecycle
// methods.
@Override
public synchronized boolean isRunning() {
return getCalculationState() == ViewCalculationState.RUNNING;
}
public boolean hasListeners() {
return !_resultListeners.isEmpty() || !_deltaListeners.isEmpty();
}
public boolean isPopulateResultModel() {
return _populateResultModel;
}
public void setPopulateResultModel(boolean populateResultModel) {
_populateResultModel = populateResultModel;
}
public synchronized void runOneCycle() {
long snapshotTime = getProcessingContext().getLiveDataSnapshotProvider().snapshot();
runOneCycle(snapshotTime);
}
public synchronized void runOneCycle(long valuationTime) {
SingleComputationCycle cycle = createCycle(valuationTime);
cycle.prepareInputs();
cycle.executePlans();
if (isPopulateResultModel()) {
cycle.populateResultModel();
recalculationPerformed(cycle.getResultModel());
}
cycle.releaseResources();
}
@Override
public synchronized void start() {
s_logger.info("Starting...");
switch(getCalculationState()) {
case NOT_STARTED:
case TERMINATED:
// Normal state of play. Continue as normal.
break;
case TERMINATING:
// In the middle of termination. This is really bad, as we're now holding the lock
// that will allow termination to complete successfully. Therefore, we have to throw
// an exception rather than just waiting or something.
throw new IllegalStateException("Instructed to start while still terminating.");
case INITIALIZING:
// Must have thrown an exception in initialization. Can't start.
throw new IllegalStateException("Initialization didn't completely successfully. Can't start.");
case NOT_INITIALIZED:
throw new IllegalStateException("Must call init() before starting.");
case STARTING:
// Must have thrown an exception when start() called previously.
throw new IllegalStateException("start() already called, but failed to start. Cannot start again.");
case RUNNING:
throw new IllegalStateException("Already running.");
}
setCalculationState(ViewCalculationState.STARTING);
ViewRecalculationJob recalcJob = new ViewRecalculationJob(this);
Thread recalcThread = new Thread(recalcJob, "Recalc Thread for " + getDefinition().getName());
setRecalcJob(recalcJob);
setRecalculationThread(recalcThread);
setCalculationState(ViewCalculationState.RUNNING);
recalcThread.start();
s_logger.info("Started.");
}
@Override
public void stop() {
s_logger.info("Stopping.....");
synchronized (this) {
switch(getCalculationState()) {
case STARTING:
// Something went horribly wrong during start, and it must have thrown an exception.
s_logger.warn("Instructed to stop the ViewImpl, but still starting. Starting must have failed. Doing nothing.");
break;
case RUNNING:
// This is the normal state of play. Do nothing.
break;
default:
throw new IllegalStateException("Cannot stop a ViewImpl that isn't running. State: " + getCalculationState());
}
}
assert getRecalcJob() != null;
assert getRecalculationThread() != null;
synchronized (this) {
if ((getCalculationState() == ViewCalculationState.TERMINATED)
|| (getCalculationState() == ViewCalculationState.TERMINATING)) {
s_logger.info("Multiple requests to stop() made, this invocation will do nothing.");
return;
}
setCalculationState(ViewCalculationState.TERMINATING);
}
getRecalcJob().terminate();
if (getRecalculationThread().getState() == Thread.State.TIMED_WAITING) {
// In this case it might be waiting on a recalculation pass. Interrupt it.
getRecalculationThread().interrupt();
}
// TODO kirk 2009-09-11 -- Have a heuristic on when to set the timeout based on
// how long the job is currently taking to cycle.
long timeout = 100 * 1000L;
boolean successful = ThreadUtil.safeJoin(getRecalculationThread(), timeout);
if (!successful) {
s_logger.warn("Unable to shut down recalc thread in {}ms", timeout);
}
synchronized (this) {
setCalculationState(ViewCalculationState.TERMINATED);
setRecalcJob(null);
setRecalculationThread(null);
}
s_logger.info("Stopped.");
}
/**
* Checks that the given user has access to every market data line required to compute the results of the view, and
* throws an exception if this is not the case.
*
* @param user the user
* @throws ViewPermissionException if any entitlement problems are found
*/
<<<<<<< MINE
public void assertAccessToLiveDataRequirements(UserPrincipal user) {
Set<ValueRequirement> requiredValues = getViewEvaluationModel().getAllLiveDataRequirements();
Collection<LiveDataSpecification> requiredLiveData = ValueRequirement.getRequiredLiveData(
requiredValues,
getProcessingContext().getSecuritySource());
=======
public boolean isEntitledToAccess(UserPrincipal user) {
try {
checkIsEntitledToAccess(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* Reading the static contents of a view, modifying the view,
* etc., can sometimes be performed even by users
* who are not entitled to view the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToAccess(UserPrincipal user) {
// not done yet
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @return true if the user should be able to view the
* computation results produced by the view. false otherwise.
*/
public boolean isEntitledToResults(UserPrincipal user) {
try {
checkIsEntitledToResults(user);
return true;
} catch (ViewAccessException e) {
return false;
}
}
/**
* A user is entitled to view the computation results produced
* by a view only if they are entitled to every market data
* line required to compute the results of the view.
*
* @param user User who is requesting access
* @throws ViewAccessException If the user is not entitled
*/
public void checkIsEntitledToResults(UserPrincipal user) {
>>>>>>> YOURS
s_logger.info("Checking that {} is entitled to the results of {}", user, this);
Collection<LiveDataSpecification> requiredLiveData = getRequiredLiveDataSpecifications();
Map<LiveDataSpecification, Boolean> entitlements = getProcessingContext().getLiveDataEntitlementChecker().isEntitled(user, requiredLiveData);
ArrayList<LiveDataSpecification> failures = new ArrayList<LiveDataSpecification>();
for (Map.Entry<LiveDataSpecification, Boolean> entry : entitlements.entrySet()) {
if (entry.getValue().booleanValue() == false) {
failures.add(entry.getKey());
}
}
if (!failures.isEmpty()) {
throw new ViewPermissionException(user + " is not entitled to the output of " + this +
" because they do not have permissions to " + failures.get(0));
}
}
private Set<ValueRequirement> getRequiredLiveData() {
Set<ValueSpecification> requiredSpecs = getViewEvaluationModel().getAllLiveDataRequirements();
Set<ValueRequirement> returnValue = new HashSet<ValueRequirement>();
for (ValueSpecification requiredSpec : requiredSpecs) {
returnValue.add(requiredSpec.getRequirementSpecification());
}
return returnValue;
}
private Collection<LiveDataSpecification> getRequiredLiveDataSpecifications() {
Set<LiveDataSpecification> returnValue = new HashSet<LiveDataSpecification>();
for (ValueRequirement requirement : getRequiredLiveData()) {
LiveDataSpecification liveDataSpec = requirement.getRequiredLiveData(getProcessingContext().getSecuritySource());
returnValue.add(liveDataSpec);
}
return returnValue;
}
@Override
public String toString() {
return "View[" + getDefinition().getName() + "]";
}
public SingleComputationCycle createCycle(long valuationTime) {
SingleComputationCycle cycle = new SingleComputationCycle(this, valuationTime);
return cycle;
}
}
Diff Result
No diff
Case 79 - java_orientdb.rev_366a1_819cc..ODirectMemoryPointer.java
Base
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
Left
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
Right
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.moveData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.moveData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
MergeMethods
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize + " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area [" + pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size) + "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize + " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area [" + pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size) + "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
KeepBothMethods
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize + " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.moveData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area [" + pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size) + "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize + " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.moveData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area [" + pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size) + "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
Safe
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
<<<<<<< MINE
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.moveData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
=======
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
>>>>>>> YOURS
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
<<<<<<< MINE
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.moveData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
=======
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.copyData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
>>>>>>> YOURS
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
Unstructured
package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
<<<<<<< MINE
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
=======
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
>>>>>>> YOURS
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.moveData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}package com.orientechnologies.common.directmemory;
import com.orientechnologies.common.serialization.types.*;
/**
* @author Andrey Lomakin <a href="mailto:lomakin.andrey@gmail.com">Andrey Lomakin</a>
* @since 10/19/13
*/
public class ODirectMemoryPointer {
private final boolean SAFE_MODE = !Boolean.valueOf(System.getProperty("memory.directMemory.unsafeMode"));
private final ODirectMemory directMemory = ODirectMemoryFactory.INSTANCE.directMemory();
private final long pageSize;
private final long dataPointer;
public ODirectMemoryPointer(long pageSize) {
if (pageSize <= 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but " + pageSize
+ " was provided.");
this.dataPointer = directMemory.allocate(pageSize);
this.pageSize = pageSize;
}
public ODirectMemoryPointer(byte[] data) {
if (data.length == 0)
throw new ODirectMemoryViolationException("Size of allocated area should be more than zero but 0 was provided.");
this.dataPointer = directMemory.allocate(data);
this.pageSize = data.length;
}
public byte[] get(long offset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
return directMemory.get(dataPointer + offset, length);
}
public void get(long offset, byte[] array, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.get(dataPointer + offset, array, arrayOffset, length);
}
public void set(long offset, byte[] content, int arrayOffset, int length) {
if (SAFE_MODE)
rangeCheck(offset, length);
directMemory.set(dataPointer + offset, content, arrayOffset, length);
}
public int getInt(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
return directMemory.getInt(dataPointer + offset);
}
public void setInt(long offset, int value) {
if (SAFE_MODE)
rangeCheck(offset, OIntegerSerializer.INT_SIZE);
directMemory.setInt(dataPointer + offset, value);
}
public void setShort(long offset, short value) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
directMemory.setShort(dataPointer + offset, value);
}
public short getShort(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OShortSerializer.SHORT_SIZE);
return directMemory.getShort(dataPointer + offset);
}
public long getLong(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
return directMemory.getLong(dataPointer + offset);
}
public void setLong(long offset, long value) {
if (SAFE_MODE)
rangeCheck(offset, OLongSerializer.LONG_SIZE);
directMemory.setLong(dataPointer + offset, value);
}
public byte getByte(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
return directMemory.getByte(dataPointer + offset);
}
public void setByte(long offset, byte value) {
if (SAFE_MODE)
rangeCheck(offset, OByteSerializer.BYTE_SIZE);
directMemory.setByte(dataPointer + offset, value);
}
public void setChar(long offset, char value) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
directMemory.setChar(dataPointer + offset, value);
}
public char getChar(long offset) {
if (SAFE_MODE)
rangeCheck(offset, OCharSerializer.CHAR_SIZE);
return directMemory.getChar(dataPointer + offset);
}
<<<<<<< MINE
public void copyData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
=======
public void moveData(long srcOffset, ODirectMemoryPointer destPointer, long destOffset, long len) {
>>>>>>> YOURS
if (SAFE_MODE) {
rangeCheck(srcOffset, len);
rangeCheck(destOffset, len);
}
directMemory.moveData(dataPointer + srcOffset, destPointer.getDataPointer() + destOffset, len);
}
private void rangeCheck(long offset, long size) {
if (offset < 0)
throw new ODirectMemoryViolationException("Negative offset was provided");
if (size < 0)
throw new ODirectMemoryViolationException("Negative size was provided");
if (offset > pageSize)
throw new ODirectMemoryViolationException("Provided offset [" + offset + "] is more than size of allocated area ["
+ pageSize + "]");
if (offset + size > pageSize)
throw new ODirectMemoryViolationException("Last position of provided data interval [" + (offset + size)
+ "] is more than size of allocated area [" + pageSize + "]");
}
public long getDataPointer() {
return dataPointer;
}
public void free() {
directMemory.free(dataPointer);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ODirectMemoryPointer that = (ODirectMemoryPointer) o;
if (dataPointer != that.dataPointer)
return false;
if (pageSize != that.pageSize)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (pageSize ^ (pageSize >>> 32));
result = 31 * result + (int) (dataPointer ^ (dataPointer >>> 32));
return result;
}
}
Diff Result
No diff
Case 80 - java_orientdb.rev_368c0_ff746..OIndexMultiValues.java
Base
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
OStreamSerializerListRID.INSTANCE);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
OStreamSerializerListRID.INSTANCE);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
Left
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, algorithm, indexEngine);
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
OStreamSerializerListRID.INSTANCE);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, algorithm, indexEngine);
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
OStreamSerializerListRID.INSTANCE);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final OMVRBTreeRIDSet values = (OMVRBTreeRIDSet) indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
Right
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
protected final boolean useSBTreeRIDSet;
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
protected final boolean useSBTreeRIDSet;
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
MergeMethods
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
protected final boolean useSBTreeRIDSet;
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME) && OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName, final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener, serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo, final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
protected final boolean useSBTreeRIDSet;
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME) && OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName, final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener, serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo, final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
KeepBothMethods
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, algorithm, indexEngine);
}
protected final boolean useSBTreeRIDSet;
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME) && OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName, final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener, serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo, final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, algorithm, indexEngine);
}
protected final boolean useSBTreeRIDSet;
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME) && OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName, final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener, serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo, final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
Safe
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
protected final boolean useSBTreeRIDSet;
<<<<<<< MINE
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
}
=======
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, algorithm, indexEngine);
}
>>>>>>> YOURS
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
protected final boolean useSBTreeRIDSet;
<<<<<<< MINE
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
}
=======
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, algorithm, indexEngine);
}
>>>>>>> YOURS
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
Unstructured
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
<<<<<<< MINE
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, algorithm, indexEngine);
=======
protected final boolean useSBTreeRIDSet;
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
>>>>>>> YOURS
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index;
import java.util.*;
import com.orientechnologies.common.collection.OMultiCollectionIterator;
import com.orientechnologies.common.comparator.ODefaultComparator;
import com.orientechnologies.common.concur.resource.OSharedResourceIterator;
import com.orientechnologies.common.listener.OProgressListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.db.record.ridset.sbtree.OSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.engine.local.OEngineLocalPaginated;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerListRID;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializerSBTreeIndexRIDContainer;
import com.orientechnologies.orient.core.storage.OStorage;
import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet;
/**
* Abstract index implementation that supports multi-values for the same key.
*
* @author Luca Garulli
*
*/
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
<<<<<<< MINE
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, algorithm, indexEngine);
=======
protected final boolean useSBTreeRIDSet;
public OIndexMultiValues(final String type, OIndexEngine<Set<OIdentifiable>> indexEngine) {
super(type, indexEngine);
OStorage storage = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage();
useSBTreeRIDSet = storage.getType().equals(OEngineLocalPaginated.NAME)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean();
>>>>>>> YOURS
}
public Set<OIdentifiable> get(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(final Object key) {
checkForRebuild();
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(final Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (useSBTreeRIDSet) {
values = new OSBTreeIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public int remove(final OIdentifiable iRecord) {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.removeValue(iRecord, MultiValuesTransformer.INSTANCE);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(final Object key, final OIdentifiable value) {
checkForRebuild();
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (useSBTreeRIDSet)
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
serializer);
}
public Collection<OIdentifiable> getValuesBetween(final Object rangeFrom, final boolean fromInclusive, final Object rangeTo,
final boolean toInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch,
MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMajor(fromKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive, final int maxValuesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getValuesMinor(toKey, isInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys, final int maxValuesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxValuesToFetch > -1 && maxValuesToFetch == result.size())
return result;
result.add(value);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive, final int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMajor(fromKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, int maxEntriesToFetch) {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.getEntriesMinor(toKey, isInclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, int maxEntriesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
return indexEngine.getEntriesBetween(rangeFrom, rangeTo, inclusive, maxEntriesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long count(Object rangeFrom, final boolean fromInclusive, Object rangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
rangeFrom = OType.convert(rangeFrom, types[0].getDefaultJavaType());
rangeTo = OType.convert(rangeTo, types[0].getDefaultJavaType());
}
if (rangeFrom != null && rangeTo != null && rangeFrom.getClass() != rangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(rangeFrom, fromInclusive, rangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public Collection<ODocument> getEntries(Collection<?> iKeys, int maxEntriesToFetch) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
for (final Object key : sortedKeys) {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (maxEntriesToFetch > -1 && maxEntriesToFetch == result.size())
return result;
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
result.add(document);
}
}
}
return result;
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
Diff Result
No diff
Case 81 - java_orientdb.rev_368c0_ff746..OSBTreeIndexEngine.java
Base
package com.orientechnologies.orient.core.index.engine;
import java.util.*;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTreeBucket;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new MapEntryIterator<V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new InverseMapEntryIterator<V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final MapEntryIterator<V> entryIterator = new MapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final InverseMapEntryIterator<V> entryIterator = new InverseMapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final MapEntryIterator<V> entryIterator = new MapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
if (transformer == null) {
if (entry.value.equals(value))
keySetToRemove.add(entry.key);
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.value);
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.key);
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
addToResult(transformer, result, entry.value, maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
addToResult(transformer, result, entry.value, maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
addToResult(transformer, result, entry.value, maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
final Object key = entry.key;
final V value = entry.value;
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
final Object key = entry.key;
final V value = entry.value;
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
final Object key = entry.key;
final V value = entry.value;
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class MapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final OSBTreeBucket.SBTreeEntry<Object, V> entry) {
preFetchedValues.add(new Map.Entry<Object, V>() {
@Override
public Object getKey() {
return entry.key;
}
@Override
public V getValue() {
return entry.value;
}
@Override
public V setValue(V v) {
throw new UnsupportedOperationException("setValue");
}
});
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class InverseMapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final OSBTreeBucket.SBTreeEntry<Object, V> entry) {
preFetchedValues.add(new Map.Entry<Object, V>() {
@Override
public Object getKey() {
return entry.key;
}
@Override
public V getValue() {
return entry.value;
}
@Override
public V setValue(V v) {
throw new UnsupportedOperationException("setValue");
}
});
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class ItemsCounter<V> implements OSBTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.value).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
package com.orientechnologies.orient.core.index.engine;
import java.util.*;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTreeBucket;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new MapEntryIterator<V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new InverseMapEntryIterator<V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final MapEntryIterator<V> entryIterator = new MapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final InverseMapEntryIterator<V> entryIterator = new InverseMapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final MapEntryIterator<V> entryIterator = new MapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
if (transformer == null) {
if (entry.value.equals(value))
keySetToRemove.add(entry.key);
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.value);
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.key);
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
addToResult(transformer, result, entry.value, maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
addToResult(transformer, result, entry.value, maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
addToResult(transformer, result, entry.value, maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
final Object key = entry.key;
final V value = entry.value;
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
final Object key = entry.key;
final V value = entry.value;
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
final Object key = entry.key;
final V value = entry.value;
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class MapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final OSBTreeBucket.SBTreeEntry<Object, V> entry) {
preFetchedValues.add(new Map.Entry<Object, V>() {
@Override
public Object getKey() {
return entry.key;
}
@Override
public V getValue() {
return entry.value;
}
@Override
public V setValue(V v) {
throw new UnsupportedOperationException("setValue");
}
});
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class InverseMapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final OSBTreeBucket.SBTreeEntry<Object, V> entry) {
preFetchedValues.add(new Map.Entry<Object, V>() {
@Override
public Object getKey() {
return entry.key;
}
@Override
public V getValue() {
return entry.value;
}
@Override
public V setValue(V v) {
throw new UnsupportedOperationException("setValue");
}
});
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class ItemsCounter<V> implements OSBTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(OSBTreeBucket.SBTreeEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.value).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
Left
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new MapEntryIterator<V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new InverseMapEntryIterator<V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final MapEntryIterator<V> entryIterator = new MapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final InverseMapEntryIterator<V> entryIterator = new InverseMapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final MapEntryIterator<V> entryIterator = new MapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class MapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class InverseMapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class ItemsCounter<V> implements OSBTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new MapEntryIterator<V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new InverseMapEntryIterator<V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final MapEntryIterator<V> entryIterator = new MapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final InverseMapEntryIterator<V> entryIterator = new InverseMapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final MapEntryIterator<V> entryIterator = new MapEntryIterator<V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class MapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class InverseMapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class ItemsCounter<V> implements OSBTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
Right
/*
* Copyright 2010-2012 Luca Garulli (l.garulli(at)orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class ItemsCounter<V> implements OTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
/*
* Copyright 2010-2012 Luca Garulli (l.garulli(at)orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class ItemsCounter<V> implements OTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
MergeMethods
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine <V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
<<<<<<< MINE
private static final class InverseMapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
private static final class MapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class ItemsCounter <V> implements OTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
<<<<<<< MINE
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
=======
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
>>>>>>> YOURS
}
}
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine <V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
<<<<<<< MINE
private static final class InverseMapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
private static final class MapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class ItemsCounter <V> implements OTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
<<<<<<< MINE
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
=======
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
>>>>>>> YOURS
}
}
KeepBothMethods
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine <V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
<<<<<<< MINE
private static final class InverseMapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
private static final class MapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class ItemsCounter <V> implements OTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine <V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
<<<<<<< MINE
private static final class InverseMapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
private static final class MapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class ItemsCounter <V> implements OTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
Safe
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine <V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
<<<<<<< MINE
private static final class InverseMapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
private static final class MapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class ItemsCounter <V> implements OTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
<<<<<<< MINE
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
=======
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
>>>>>>> YOURS
}
}
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine <V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
<<<<<<< MINE
private static final class InverseMapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
private static final class MapEntryIterator <V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
=======
>>>>>>> YOURS
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void load(ORID indexRid, String indexName, boolean isAutomatic) {
acquireExclusiveLock();
try {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
private static final class ItemsCounter <V> implements OTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
<<<<<<< MINE
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
=======
@Override
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
>>>>>>> YOURS
}
}
Unstructured
/*
* Copyright 2010-2012 Luca Garulli (l.garulli(at)orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
<<<<<<< MINE
import java.util.LinkedList;
=======
>>>>>>> YOURS
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
<<<<<<< MINE
private static final class MapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class InverseMapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class ItemsCounter<V> implements OSBTree.RangeResultListener<Object, V> {
=======
private static final class ItemsCounter<V> implements OTree.RangeResultListener<Object, V> {
>>>>>>> YOURS
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}/*
* Copyright 2010-2012 Luca Garulli (l.garulli(at)orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.index.engine;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
<<<<<<< MINE
import java.util.LinkedList;
=======
>>>>>>> YOURS
import java.util.Map;
import java.util.Set;
import com.orientechnologies.common.concur.resource.OSharedResourceAdaptiveExternal;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.index.ODocumentFieldsHashSet;
import com.orientechnologies.orient.core.index.OIndexDefinition;
import com.orientechnologies.orient.core.index.OIndexEngine;
import com.orientechnologies.orient.core.index.ORuntimeKeyIndexDefinition;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeInverseMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OSBTreeMapEntryIterator;
import com.orientechnologies.orient.core.index.sbtree.OTree;
import com.orientechnologies.orient.core.index.sbtree.local.OSBTree;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.record.impl.ORecordBytes;
import com.orientechnologies.orient.core.serialization.serializer.binary.OBinarySerializerFactory;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OCompositeKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.binary.impl.index.OSimpleKeySerializer;
import com.orientechnologies.orient.core.serialization.serializer.stream.OStreamSerializer;
import com.orientechnologies.orient.core.storage.impl.local.OStorageLocalAbstract;
/**
* @author Andrey Lomakin
* @since 8/30/13
*/
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, 0, keySerializer, (OBinarySerializer<V>) valueSerializer, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, 0, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public int removeValue(final OIdentifiable value, final ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final Set<Object> keySetToRemove = new HashSet<Object>();
if (sbTree.size() == 0)
return 0;
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (transformer == null) {
if (entry.getValue().equals(value))
keySetToRemove.add(entry.getKey());
} else {
Collection<OIdentifiable> identifiables = transformer.transformFromValue(entry.getValue());
for (OIdentifiable identifiable : identifiables) {
if (identifiable.equals(value))
keySetToRemove.add(entry.getKey());
}
}
return true;
}
});
for (Object keyToRemove : keySetToRemove)
sbTree.remove(keyToRemove);
return keySetToRemove.size();
} finally {
releaseExclusiveLock();
}
}
@Override
public Collection<OIdentifiable> getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final int maxValuesToFetch, final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive, final int maxValuesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
addToResult(transformer, result, entry.getValue(), maxValuesToFetch);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public Collection<ODocument> getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final int maxEntriesToFetch,
final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
final Object key = entry.getKey();
final V value = entry.getValue();
addToEntriesResult(transformer, result, key, value, maxEntriesToFetch);
if (maxEntriesToFetch > -1 && result.size() == maxEntriesToFetch)
return false;
return true;
}
});
return result;
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private void addToResult(ValuesTransformer<V> transformer, Set<OIdentifiable> result, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
result.add(transformedValue);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else
result.add((OIdentifiable) value);
}
private void addToEntriesResult(ValuesTransformer<V> transformer, Set<ODocument> result, Object key, V value, int maxValuesToFetch) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
result.add(document);
if (maxValuesToFetch > -1 && result.size() == maxValuesToFetch)
return;
}
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
result.add(document);
}
}
<<<<<<< MINE
private static final class MapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private final OSBTree<Object, V> sbTree;
private Object firstKey;
MapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
firstKey = sbTree.firstKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMajor(firstKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
firstKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class InverseMapEntryIterator<V> implements Iterator<Map.Entry<Object, V>> {
private final OSBTree<Object, V> sbTree;
private LinkedList<Map.Entry<Object, V>> preFetchedValues;
private Object lastKey;
InverseMapEntryIterator(OSBTree<Object, V> sbTree) {
this.sbTree = sbTree;
if (sbTree.size() == 0) {
this.preFetchedValues = null;
return;
}
this.preFetchedValues = new LinkedList<Map.Entry<Object, V>>();
lastKey = sbTree.lastKey();
prefetchData(true);
}
private void prefetchData(boolean firstTime) {
sbTree.loadEntriesMinor(lastKey, firstTime, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(final Map.Entry<Object, V> entry) {
preFetchedValues.add(entry);
return preFetchedValues.size() <= 8000;
}
});
if (preFetchedValues.isEmpty())
preFetchedValues = null;
else
lastKey = preFetchedValues.getLast().getKey();
}
@Override
public boolean hasNext() {
return preFetchedValues != null;
}
@Override
public Map.Entry<Object, V> next() {
final Map.Entry<Object, V> entry = preFetchedValues.removeFirst();
if (preFetchedValues.isEmpty())
prefetchData(false);
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
private static final class ItemsCounter<V> implements OSBTree.RangeResultListener<Object, V> {
=======
private static final class ItemsCounter<V> implements OTree.RangeResultListener<Object, V> {
>>>>>>> YOURS
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
<<<<<<< MINE
public boolean addResult(Map.Entry<Object, V> entry) {
=======
public boolean addResult(OTree.BucketEntry<Object, V> entry) {
>>>>>>> YOURS
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
Diff Result
No diff
Case 82 - java_realmjava.rev_2eb11_b899c..TableQuery.java
Base
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
public TableQuery equal(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqual(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
public TableQuery equal(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqual(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
public TableQuery equal(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqual(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
public TableQuery equal(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equal(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery eq(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqual(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery neq(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeNotEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gt(long columnIndex, Date value){
nativeGreaterDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
nativeGreaterEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gte(long columnIndex, Date value){
nativeGreaterEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lt(long columnIndex, Date value){
nativeLessDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value){
nativeLessEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lte(long columnIndex, Date value){
nativeLessEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDate(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDate(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
public TableQuery equal(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery eq(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equal(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
public TableQuery eq(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
public TableQuery notEqual(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery neq(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqual(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
public TableQuery neq(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sum(long columnIndex, long start, long end, long limit){
return nativeSum(nativePtr, columnIndex, start, end, limit);
}
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximum(long columnIndex, long start, long end, long limit){
return nativeMaximum(nativePtr, columnIndex, start, end, limit);
}
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimum(long columnIndex, long start, long end, long limit){
return nativeMinimum(nativePtr, columnIndex, start, end, limit);
}
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double average(long columnIndex, long start, long end, long limit){
return nativeAverage(nativePtr, columnIndex, start, end, limit);
}
public double average(long columnIndex){
return nativeAverage(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverage(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
public TableQuery equal(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqual(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
public TableQuery equal(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqual(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
public TableQuery equal(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqual(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
public TableQuery equal(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equal(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery eq(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqual(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery neq(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeNotEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gt(long columnIndex, Date value){
nativeGreaterDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
nativeGreaterEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gte(long columnIndex, Date value){
nativeGreaterEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lt(long columnIndex, Date value){
nativeLessDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value){
nativeLessEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lte(long columnIndex, Date value){
nativeLessEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDate(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDate(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
public TableQuery equal(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery eq(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equal(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
public TableQuery eq(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
public TableQuery notEqual(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery neq(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqual(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
public TableQuery neq(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sum(long columnIndex, long start, long end, long limit){
return nativeSum(nativePtr, columnIndex, start, end, limit);
}
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximum(long columnIndex, long start, long end, long limit){
return nativeMaximum(nativePtr, columnIndex, start, end, limit);
}
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimum(long columnIndex, long start, long end, long limit){
return nativeMinimum(nativePtr, columnIndex, start, end, limit);
}
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double average(long columnIndex, long start, long end, long limit){
return nativeAverage(nativePtr, columnIndex, start, end, limit);
}
public double average(long columnIndex){
return nativeAverage(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverage(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Left
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
public TableQuery equal(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqual(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
public TableQuery equal(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqual(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
public TableQuery equal(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqual(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
public TableQuery equal(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equal(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery eq(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqual(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery neq(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gt(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gte(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lt(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lte(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
public TableQuery equal(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery eq(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equal(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
public TableQuery eq(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
public TableQuery notEqual(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery neq(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqual(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
public TableQuery neq(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit){
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit){
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit){
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit){
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex){
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
public TableQuery equal(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqual(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
public TableQuery equal(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqual(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
public TableQuery equal(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqual(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery neq(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
public TableQuery gt(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery gte(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
public TableQuery lt(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery lte(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
public TableQuery equal(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
public TableQuery eq(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equal(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery eq(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqual(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery neq(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gt(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gte(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lt(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lte(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
public TableQuery equal(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery eq(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equal(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
public TableQuery eq(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
public TableQuery notEqual(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery neq(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqual(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
public TableQuery neq(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit){
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit){
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit){
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit){
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex){
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Right
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
public TableQuery equalTo(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
public TableQuery equalTo(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeNotEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
nativeGreaterEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value){
nativeLessEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDate(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDate(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sum(long columnIndex, long start, long end, long limit){
return nativeSum(nativePtr, columnIndex, start, end, limit);
}
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximum(long columnIndex, long start, long end, long limit){
return nativeMaximum(nativePtr, columnIndex, start, end, limit);
}
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimum(long columnIndex, long start, long end, long limit){
return nativeMinimum(nativePtr, columnIndex, start, end, limit);
}
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double average(long columnIndex, long start, long end, long limit){
return nativeAverage(nativePtr, columnIndex, start, end, limit);
}
public double average(long columnIndex){
return nativeAverage(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverage(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
public TableQuery equalTo(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
public TableQuery equalTo(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeNotEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
nativeGreaterEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value){
nativeLessEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessEqualDate(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDate(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDate(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sum(long columnIndex, long start, long end, long limit){
return nativeSum(nativePtr, columnIndex, start, end, limit);
}
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximum(long columnIndex, long start, long end, long limit){
return nativeMaximum(nativePtr, columnIndex, start, end, limit);
}
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimum(long columnIndex, long start, long end, long limit){
return nativeMinimum(nativePtr, columnIndex, start, end, limit);
}
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimum(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double average(long columnIndex, long start, long end, long limit){
return nativeAverage(nativePtr, columnIndex, start, end, limit);
}
public double average(long columnIndex){
return nativeAverage(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverage(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
MergeMethods
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable) {
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv) {
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group() {
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup() {
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex) {
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable() {
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or() {
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
// Query for float values.
public TableQuery equalTo(long columnIndex, float value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
// Query for double values.
public TableQuery equalTo(long columnIndex, double value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equalTo(long columnIndex, Date value) {
nativeEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, Date value) {
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery notEqualTo(long columnIndex, Date value) {
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value) {
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value) {
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value) {
nativeLessDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeLessDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value) {
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2) {
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime() / 1000, value2.getTime() / 1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive) {
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value) {
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive) {
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value) {
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive) {
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value) {
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive) {
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value) {
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive) {
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value) {
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow) {
return nativeFindNext(nativePtr, lastRow);
}
public long findNext() {
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit) {
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll() {
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit) {
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit) {
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit) {
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit) {
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit) {
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit) {
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit) {
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit) {
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit) {
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit) {
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit) {
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit) {
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit) {
return nativeCount(nativePtr, start, end, limit);
}
public long count() {
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end) {
if (immutable)
throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove() {
if (immutable)
throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable) {
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv) {
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group() {
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup() {
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex) {
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable() {
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or() {
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
// Query for float values.
public TableQuery equalTo(long columnIndex, float value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
// Query for double values.
public TableQuery equalTo(long columnIndex, double value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equalTo(long columnIndex, Date value) {
nativeEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, Date value) {
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery notEqualTo(long columnIndex, Date value) {
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value) {
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value) {
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value) {
nativeLessDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeLessDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value) {
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery equalTo(long columnIndex, Date value) {
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2) {
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime() / 1000, value2.getTime() / 1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive) {
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value) {
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive) {
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value) {
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive) {
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value) {
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive) {
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value) {
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive) {
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value) {
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow) {
return nativeFindNext(nativePtr, lastRow);
}
public long findNext() {
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit) {
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll() {
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit) {
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit) {
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit) {
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit) {
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit) {
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit) {
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit) {
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit) {
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit) {
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit) {
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit) {
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit) {
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit) {
return nativeCount(nativePtr, start, end, limit);
}
public long count() {
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end) {
if (immutable)
throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove() {
if (immutable)
throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
KeepBothMethods
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable) {
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv) {
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group() {
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup() {
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex) {
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable() {
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or() {
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
// Query for float values.
public TableQuery equalTo(long columnIndex, float value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
// Query for double values.
public TableQuery equalTo(long columnIndex, double value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equal(long columnIndex, Date value) {
nativeEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery eq(long columnIndex, Date value) {
nativeEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
// Query for Date values
public TableQuery equalTo(long columnIndex, Date value) {
nativeEqualDate(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery notEqual(long columnIndex, Date value) {
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery neq(long columnIndex, Date value) {
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, Date value) {
nativeNotEqualDate(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery greaterThan(long columnIndex, Date value) {
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery gt(long columnIndex, Date value) {
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value) {
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery gte(long columnIndex, Date value) {
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value) {
nativeLessDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery lt(long columnIndex, Date value) {
nativeLessDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value) {
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery lte(long columnIndex, Date value) {
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2) {
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime() / 1000, value2.getTime() / 1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive) {
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value) {
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive) {
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value) {
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive) {
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value) {
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive) {
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value) {
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive) {
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value) {
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow) {
return nativeFindNext(nativePtr, lastRow);
}
public long findNext() {
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit) {
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll() {
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit) {
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit) {
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit) {
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit) {
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit) {
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit) {
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit) {
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit) {
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit) {
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit) {
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit) {
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit) {
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit) {
return nativeCount(nativePtr, start, end, limit);
}
public long count() {
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end) {
if (immutable)
throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove() {
if (immutable)
throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable) {
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv) {
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group() {
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup() {
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex) {
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable() {
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or() {
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
// Query for float values.
public TableQuery equalTo(long columnIndex, float value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
// Query for double values.
public TableQuery equalTo(long columnIndex, double value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value) {
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value) {
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value) {
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value) {
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value) {
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2) {
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value) {
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
public TableQuery equal(long columnIndex, Date value) {
nativeEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery eq(long columnIndex, Date value) {
nativeEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
// Query for Date values
public TableQuery equalTo(long columnIndex, Date value) {
nativeEqualDate(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery notEqual(long columnIndex, Date value) {
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery neq(long columnIndex, Date value) {
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, Date value) {
nativeNotEqualDate(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery greaterThan(long columnIndex, Date value) {
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery gt(long columnIndex, Date value) {
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value) {
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery gte(long columnIndex, Date value) {
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value) {
nativeLessDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery lt(long columnIndex, Date value) {
nativeLessDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value) {
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
public TableQuery lte(long columnIndex, Date value) {
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime() / 1000);
return this;
}
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2) {
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime() / 1000, value2.getTime() / 1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive) {
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value) {
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive) {
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value) {
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive) {
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value) {
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive) {
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value) {
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive) {
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value) {
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow) {
return nativeFindNext(nativePtr, lastRow);
}
public long findNext() {
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit) {
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll() {
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit) {
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit) {
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit) {
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit) {
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit) {
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit) {
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit) {
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit) {
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit) {
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit) {
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit) {
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit) {
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit) {
return nativeCount(nativePtr, start, end, limit);
}
public long count() {
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end) {
if (immutable)
throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove() {
if (immutable)
throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Safe
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
// Query for float values.
public TableQuery equalTo(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
// Query for double values.
public TableQuery equalTo(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery equal(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery eq(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
<<<<<<< MINE
public TableQuery notEqualTo(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery notEqual(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
<<<<<<< MINE
public TableQuery notEqualTo(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery neq(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery gt(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery gte(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery lt(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery lte(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit){
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit){
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit){
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit){
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex){
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
// Query for float values.
public TableQuery equalTo(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
// Query for double values.
public TableQuery equalTo(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery equal(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery eq(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
<<<<<<< MINE
public TableQuery notEqualTo(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery notEqual(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
<<<<<<< MINE
public TableQuery notEqualTo(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery neq(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery gt(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery gte(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery lt(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
=======
public TableQuery lte(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
>>>>>>> YOURS
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit){
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit){
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit){
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit){
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex){
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Unstructured
package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
public TableQuery equalTo(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
public TableQuery equalTo(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
<<<<<<< MINE
public TableQuery equal(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery eq(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
=======
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
>>>>>>> YOURS
return this;
}
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
<<<<<<< MINE
public TableQuery notEqual(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery neq(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
=======
public TableQuery notEqualTo(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
>>>>>>> YOURS
return this;
}
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery gt(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
=======
protected native void nativeGreaterDate(long nativeQueryPtr, long columnIndex, long value);
>>>>>>> YOURS
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
<<<<<<< MINE
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gte(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
=======
nativeGreaterEqualDate(nativePtr, columnIndex, value.getTime()/1000);
>>>>>>> YOURS
return this;
}
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery lt(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
=======
protected native void nativeLessDate(long nativeQueryPtr, long columnIndex, long value);
>>>>>>> YOURS
public TableQuery lessThanOrEqual(long columnIndex, Date value){
<<<<<<< MINE
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lte(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
=======
nativeLessEqualDate(nativePtr, columnIndex, value.getTime()/1000);
>>>>>>> YOURS
return this;
}
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit){
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit){
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit){
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit){
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex){
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}package com.tightdb;
import java.util.Date;
public class TableQuery {
protected boolean DEBUG = false;
protected long nativePtr;
protected boolean immutable = false;
// TODO: Can we protect this?
public TableQuery(long nativeQueryPtr, boolean immutable){
if (DEBUG)
System.err.println("++++++ new TableQuery, ptr= " + nativeQueryPtr);
this.immutable = immutable;
this.nativePtr = nativeQueryPtr;
}
@Override
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("++++ Query CLOSE, ptr= " + nativePtr);
if (nativePtr == 0) {
return;
}
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeQueryPtr);
// Query TableView
public TableQuery tableview(TableView tv){
nativeTableview(nativePtr, tv.nativePtr);
return this;
}
protected native void nativeTableview(long nativeQueryPtr, long nativeTableViewPtr);
// Grouping
public TableQuery group(){
nativeGroup(nativePtr);
return this;
}
protected native void nativeGroup(long nativeQueryPtr);
public TableQuery endGroup(){
nativeEndGroup(nativePtr);
return this;
}
protected native void nativeEndGroup(long nativeQueryPtr);
public TableQuery subTable(long columnIndex){
nativeSubTable(nativePtr, columnIndex);
return this;
}
protected native void nativeSubTable(long nativeQueryPtr, long columnIndex);
public TableQuery endSubTable(){
nativeParent(nativePtr);
return this;
}
protected native void nativeParent(long nativeQueryPtr);
public TableQuery or(){
nativeOr(nativePtr);
return this;
}
protected native void nativeOr(long nativeQueryPtr);
// Query for integer values.
public TableQuery equalTo(long columnIndex, long value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery notEqualTo(long columnIndex, long value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, long value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThanOrEqual(long columnIndex, long value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, long value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThanOrEqual(long columnIndex, long value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, long value1, long value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for float values.
public TableQuery equalTo(long columnIndex, float value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery notEqualTo(long columnIndex, float value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThan(long columnIndex, float value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, float value);
public TableQuery greaterThanOrEqual(long columnIndex, float value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThan(long columnIndex, float value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, float value);
public TableQuery lessThanOrEqual(long columnIndex, float value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, float value);
public TableQuery between(long columnIndex, float value1, float value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, float value1, float value2);
// Query for double values.
public TableQuery equalTo(long columnIndex, double value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery notEqualTo(long columnIndex, double value){
nativeNotEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThan(long columnIndex, double value){
nativeGreater(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreater(long nativeQueryPtr, long columnIndex, double value);
public TableQuery greaterThanOrEqual(long columnIndex, double value){
nativeGreaterEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeGreaterEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThan(long columnIndex, double value){
nativeLess(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLess(long nativeQueryPtr, long columnIndex, double value);
public TableQuery lessThanOrEqual(long columnIndex, double value){
nativeLessEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeLessEqual(long nativeQueryPtr, long columnIndex, double value);
public TableQuery between(long columnIndex, double value1, double value2){
nativeBetween(nativePtr, columnIndex, value1, value2);
return this;
}
protected native void nativeBetween(long nativeQueryPtr, long columnIndex, double value1, double value2);
// Query for boolean values.
public TableQuery equalTo(long columnIndex, boolean value){
nativeEqual(nativePtr, columnIndex, value);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, boolean value);
// Query for Date values
<<<<<<< MINE
public TableQuery equal(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery eq(long columnIndex, Date value){
nativeEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
=======
public TableQuery equalTo(long columnIndex, Date value){
nativeEqualDate(nativePtr, columnIndex, value.getTime()/1000);
>>>>>>> YOURS
return this;
}
protected native void nativeEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
<<<<<<< MINE
public TableQuery notEqual(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery neq(long columnIndex, Date value){
nativeNotEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
=======
public TableQuery notEqualTo(long columnIndex, Date value){
nativeNotEqualDate(nativePtr, columnIndex, value.getTime()/1000);
>>>>>>> YOURS
return this;
}
protected native void nativeNotEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery greaterThan(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery gt(long columnIndex, Date value){
nativeGreaterDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeGreaterDateTime(long nativeQueryPtr, long columnIndex, long value);
=======
protected native void nativeGreaterDate(long nativeQueryPtr, long columnIndex, long value);
>>>>>>> YOURS
public TableQuery greaterThanOrEqual(long columnIndex, Date value){
<<<<<<< MINE
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery gte(long columnIndex, Date value){
nativeGreaterEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
=======
nativeGreaterEqualDate(nativePtr, columnIndex, value.getTime()/1000);
>>>>>>> YOURS
return this;
}
protected native void nativeGreaterEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery lessThan(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
<<<<<<< MINE
public TableQuery lt(long columnIndex, Date value){
nativeLessDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
protected native void nativeLessDateTime(long nativeQueryPtr, long columnIndex, long value);
=======
protected native void nativeLessDate(long nativeQueryPtr, long columnIndex, long value);
>>>>>>> YOURS
public TableQuery lessThanOrEqual(long columnIndex, Date value){
<<<<<<< MINE
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
return this;
}
public TableQuery lte(long columnIndex, Date value){
nativeLessEqualDateTime(nativePtr, columnIndex, value.getTime()/1000);
=======
nativeLessEqualDate(nativePtr, columnIndex, value.getTime()/1000);
>>>>>>> YOURS
return this;
}
protected native void nativeLessEqualDateTime(long nativeQueryPtr, long columnIndex, long value);
public TableQuery between(long columnIndex, Date value1, Date value2){
nativeBetweenDateTime(nativePtr, columnIndex, value1.getTime()/1000, value2.getTime()/1000);
return this;
}
protected native void nativeBetweenDateTime(long nativeQueryPtr, long columnIndex, long value1, long value2);
// Query for String values.
// Equal
public TableQuery equalTo(long columnIndex, String value, boolean caseSensitive){
nativeEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery equalTo(long columnIndex, String value){
nativeEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Not Equal
public TableQuery notEqualTo(long columnIndex, String value, boolean caseSensitive){
nativeNotEqual(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery notEqualTo(long columnIndex, String value){
nativeNotEqual(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeNotEqual(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery beginsWith(long columnIndex, String value, boolean caseSensitive){
nativeBeginsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery beginsWith(long columnIndex, String value){
nativeBeginsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeBeginsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery endsWith(long columnIndex, String value, boolean caseSensitive){
nativeEndsWith(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery endsWith(long columnIndex, String value){
nativeEndsWith(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeEndsWith(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
public TableQuery contains(long columnIndex, String value, boolean caseSensitive){
nativeContains(nativePtr, columnIndex, value, caseSensitive);
return this;
}
public TableQuery contains(long columnIndex, String value){
nativeContains(nativePtr, columnIndex, value, true);
return this;
}
protected native void nativeContains(long nativeQueryPtr, long columnIndex, String value, boolean caseSensitive);
// Searching methods.
public long findNext(long lastRow){
return nativeFindNext(nativePtr, lastRow);
}
public long findNext(){
return nativeFindNext(nativePtr, Table.INFINITE);
}
protected native long nativeFindNext(long nativeQueryPtr, long lastRow);
public TableView findAll(long start, long end, long limit){
return new TableView(nativeFindAll(nativePtr, start, end, limit), immutable);
}
public TableView findAll(){
return new TableView(nativeFindAll(nativePtr, 0, Table.INFINITE, Table.INFINITE), immutable);
}
protected native long nativeFindAll(long nativeQueryPtr, long start, long end, long limit);
//
// Aggregation methods
//
// Integer aggregation
public long sumInt(long columnIndex, long start, long end, long limit){
return nativeSumInt(nativePtr, columnIndex, start, end, limit);
}
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeSumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long maximumInt(long columnIndex, long start, long end, long limit){
return nativeMaximumInt(nativePtr, columnIndex, start, end, limit);
}
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMaximumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public long minimumInt(long columnIndex, long start, long end, long limit){
return nativeMinimumInt(nativePtr, columnIndex, start, end, limit);
}
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeMinimumInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageInt(long columnIndex, long start, long end, long limit){
return nativeAverageInt(nativePtr, columnIndex, start, end, limit);
}
public double averageInt(long columnIndex){
return nativeAverageInt(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageInt(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// float aggregation
public double sumFloat(long columnIndex, long start, long end, long limit){
return nativeSumFloat(nativePtr, columnIndex, start, end, limit);
}
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float maximumFloat(long columnIndex, long start, long end, long limit){
return nativeMaximumFloat(nativePtr, columnIndex, start, end, limit);
}
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMaximumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public float minimumFloat(long columnIndex, long start, long end, long limit){
return nativeMinimumFloat(nativePtr, columnIndex, start, end, limit);
}
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native float nativeMinimumFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageFloat(long columnIndex, long start, long end, long limit){
return nativeAverageFloat(nativePtr, columnIndex, start, end, limit);
}
public double averageFloat(long columnIndex){
return nativeAverageFloat(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageFloat(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// double aggregation
public double sumDouble(long columnIndex, long start, long end, long limit){
return nativeSumDouble(nativePtr, columnIndex, start, end, limit);
}
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeSumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double maximumDouble(long columnIndex, long start, long end, long limit){
return nativeMaximumDouble(nativePtr, columnIndex, start, end, limit);
}
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMaximumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double minimumDouble(long columnIndex, long start, long end, long limit){
return nativeMinimumDouble(nativePtr, columnIndex, start, end, limit);
}
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeMinimumDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
public double averageDouble(long columnIndex, long start, long end, long limit){
return nativeAverageDouble(nativePtr, columnIndex, start, end, limit);
}
public double averageDouble(long columnIndex){
return nativeAverageDouble(nativePtr, columnIndex, 0, Table.INFINITE, Table.INFINITE);
}
protected native double nativeAverageDouble(long nativeQueryPtr, long columnIndex, long start, long end, long limit);
// count
// TODO: Rename all start, end parameter names to firstRow, lastRow
public long count(long start, long end, long limit){
return nativeCount(nativePtr, start, end, limit);
}
public long count(){
return nativeCount(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeCount(long nativeQueryPtr, long start, long end, long limit);
// Deletion.
public long remove(long start, long end){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, start, end, Table.INFINITE);
}
public long remove(){
if (immutable) throwImmutable();
return nativeRemove(nativePtr, 0, Table.INFINITE, Table.INFINITE);
}
protected native long nativeRemove(long nativeQueryPtr, long start, long end, long limit);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Diff Result
No diff
Case 83 - java_realmjava.rev_36612_8eb3b..Table.java
Base
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableDefinition {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
public void removeColumn(long columnIndex)
{
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
public ColumnType getMixedType(long columnIndex, long rowIndex)
{
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableDefinition {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
public void removeColumn(long columnIndex)
{
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
public ColumnType getMixedType(long columnIndex, long rowIndex)
{
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Left
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Right
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableDefinition {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
public ColumnType getMixedType(long columnIndex, long rowIndex)
{
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
/* // Requires that the first column is a string column with unique values. Also index required?
@Override
public long lookup(String value) {
if (this.getColumnType(0) != ColumnType.STRING)
throw new RuntimeException("lookup() requires a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value); */
// Experimental feature
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableDefinition {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
public ColumnType getMixedType(long columnIndex, long rowIndex)
{
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
/* // Requires that the first column is a string column with unique values. Also index required?
@Override
public long lookup(String value) {
if (this.getColumnType(0) != ColumnType.STRING)
throw new RuntimeException("lookup() requires a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value); */
// Experimental feature
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
MergeMethods
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sumInt(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
public long maximumInt(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
public long minimumInt(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
public double averageInt(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sumInt(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
public long maximumInt(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
public long minimumInt(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
public double averageInt(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
KeepBothMethods
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
//
// Aggregate functions
//
// Integers
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
//
// Aggregate functions
//
// Integers
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Safe
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
<<<<<<< MINE
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
=======
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
>>>>>>> YOURS
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
<<<<<<< MINE
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
=======
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
>>>>>>> YOURS
<<<<<<< MINE
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
=======
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
>>>>>>> YOURS
<<<<<<< MINE
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
=======
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
>>>>>>> YOURS
<<<<<<< MINE
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
=======
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
>>>>>>> YOURS
protected native long nativeSumInt(long nativePtr, long columnIndex);
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
<<<<<<< MINE
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
=======
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
>>>>>>> YOURS
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
<<<<<<< MINE
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
=======
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
>>>>>>> YOURS
<<<<<<< MINE
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
=======
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
>>>>>>> YOURS
<<<<<<< MINE
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
=======
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
>>>>>>> YOURS
<<<<<<< MINE
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
=======
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
>>>>>>> YOURS
protected native long nativeSumInt(long nativePtr, long columnIndex);
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Unstructured
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
<<<<<<< MINE
@Override
=======
/*
>>>>>>> YOURS
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
<<<<<<< MINE
@Override
=======
/*
>>>>>>> YOURS
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
<<<<<<< MINE
@Override
public void addLong(long columnIndex, long value) {
=======
public void adjust(long columnIndex, long value) {
>>>>>>> YOURS
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
<<<<<<< MINE
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
=======
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
<<<<<<< MINE
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
=======
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
<<<<<<< MINE
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
=======
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
<<<<<<< MINE
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
=======
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
<<<<<<< MINE
// Requires that the first column is a string column with index
=======
/* // Requires that the first column is a string column with unique values. Also index required?
>>>>>>> YOURS
@Override
public long lookup(String value) {
if (this.getColumnType(0) != ColumnType.STRING)
throw new RuntimeException("lookup() requires a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value); */
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
<<<<<<< MINE
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
=======
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
>>>>>>> YOURS
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
TableView view = this.where().findAll();
view.sort(columnIndex);
return view;
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
TableView view = this.where().findAll();
view.sort(columnIndex, order);
return view;
}
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
<<<<<<< MINE
@Override
=======
/*
>>>>>>> YOURS
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
<<<<<<< MINE
@Override
=======
/*
>>>>>>> YOURS
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
<<<<<<< MINE
@Override
public void addLong(long columnIndex, long value) {
=======
public void adjust(long columnIndex, long value) {
>>>>>>> YOURS
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
<<<<<<< MINE
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
=======
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
<<<<<<< MINE
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
=======
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
<<<<<<< MINE
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
=======
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
<<<<<<< MINE
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
=======
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
<<<<<<< MINE
// Requires that the first column is a string column with index
=======
/* // Requires that the first column is a string column with unique values. Also index required?
>>>>>>> YOURS
@Override
public long lookup(String value) {
if (this.getColumnType(0) != ColumnType.STRING)
throw new RuntimeException("lookup() requires a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value); */
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
<<<<<<< MINE
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
=======
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
>>>>>>> YOURS
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
<<<<<<< MINE
@Override
=======
>>>>>>> YOURS
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Diff Result
No diff
Case 84 - java_realmjava.rev_36612_8eb3b..TableView.java
Base
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false; //true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetBinary(long nativeViewPtr, long columnIndex, long rowIndex);
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetBinary(long nativeViewPtr, long columnIndex, long rowIndex, ByteBuffer data);
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativeViewPtr, long columnIndex);
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Float aggregates
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending, descending };
public void sort(long columnIndex, Order order) {
if (immutable) throwImmutable();
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
if (immutable) throwImmutable();
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
public String toString() {
return nativeToString(nativePtr, 500);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long lookup(String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false; //true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetBinary(long nativeViewPtr, long columnIndex, long rowIndex);
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetBinary(long nativeViewPtr, long columnIndex, long rowIndex, ByteBuffer data);
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativeViewPtr, long columnIndex);
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Float aggregates
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending, descending };
public void sort(long columnIndex, Order order) {
if (immutable) throwImmutable();
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
if (immutable) throwImmutable();
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
public String toString() {
return nativeToString(nativePtr, 500);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable()
{
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long lookup(String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
Left
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false; //true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetBinary(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetBinary(long nativeViewPtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
@Override
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
@Override
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
@Override
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativeViewPtr, long columnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending, descending };
public void sort(long columnIndex, Order order) {
if (immutable) throwImmutable();
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
if (immutable) throwImmutable();
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long lookup(String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false; //true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetBinary(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetBinary(long nativeViewPtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
@Override
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
@Override
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
@Override
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativeViewPtr, long columnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending, descending };
public void sort(long columnIndex, Order order) {
if (immutable) throwImmutable();
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
if (immutable) throwImmutable();
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long lookup(String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
Right
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false; //true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetBinary(long nativeViewPtr, long columnIndex, long rowIndex);
*/
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
/*
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetBinary(long nativeViewPtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending, descending };
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
public String toString() {
return nativeToString(nativePtr, 500);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
/* @Override
public long lookup(String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}*/
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false; //true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetBinary(long nativeViewPtr, long columnIndex, long rowIndex);
*/
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
/*
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetBinary(long nativeViewPtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending, descending };
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
public String toString() {
return nativeToString(nativePtr, 500);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
/* @Override
public long lookup(String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}*/
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
MergeMethods
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable) {
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value) {
if (immutable)
throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime() / 1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
public void adjust(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
public long sumInt(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
public long maximumInt(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
public long minimumInt(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
public double averageInt(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
public enum Order {
ascending(), descending()
}
;
;
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable) {
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value) {
if (immutable)
throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime() / 1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
public void adjust(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
public long sumInt(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
public long maximumInt(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
public long minimumInt(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
public double averageInt(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
public enum Order {
ascending(), descending()
}
;
;
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
KeepBothMethods
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable) {
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value) {
if (immutable)
throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime() / 1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
@Override
public void addLong(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
public void adjust(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
public enum Order {
ascending(), descending()
}
;
;
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable) {
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close() {
if (DEBUG)
System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value) {
if (immutable)
throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime() / 1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
@Override
public void addLong(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
public void adjust(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
public enum Order {
ascending(), descending()
}
;
;
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
Safe
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false;
//true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
<<<<<<< MINE
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
=======
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
>>>>>>> YOURS
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
<<<<<<< MINE
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex);
}
=======
@Override
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex);
}
>>>>>>> YOURS
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
<<<<<<< MINE
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex);
}
=======
@Override
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex);
}
>>>>>>> YOURS
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
<<<<<<< MINE
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex);
}
=======
@Override
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex);
}
>>>>>>> YOURS
<<<<<<< MINE
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
=======
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
>>>>>>> YOURS
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending , descending}
;
;
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false;
//true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
<<<<<<< MINE
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
=======
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
>>>>>>> YOURS
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
<<<<<<< MINE
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex);
}
=======
@Override
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex);
}
>>>>>>> YOURS
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
<<<<<<< MINE
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex);
}
=======
@Override
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex);
}
>>>>>>> YOURS
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
<<<<<<< MINE
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex);
}
=======
@Override
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex);
}
>>>>>>> YOURS
<<<<<<< MINE
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
=======
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
>>>>>>> YOURS
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending , descending}
;
;
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
Unstructured
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false; //true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
<<<<<<< MINE
@Override
=======
/*
>>>>>>> YOURS
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetBinary(long nativeViewPtr, long columnIndex, long rowIndex);
<<<<<<< MINE
@Override
=======
*/
>>>>>>> YOURS
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
<<<<<<< MINE
@Override
=======
/*
>>>>>>> YOURS
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetBinary(long nativeViewPtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
<<<<<<< MINE
@Override
public void addLong(long columnIndex, long value) {
=======
public void adjust(long columnIndex, long value) {
>>>>>>> YOURS
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
<<<<<<< MINE
@Override
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex);
=======
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
<<<<<<< MINE
@Override
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex);
=======
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
<<<<<<< MINE
@Override
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex);
=======
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
<<<<<<< MINE
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
=======
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending, descending };
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
/* @Override
public long lookup(String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}*/
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
/**
* This class represents a view of a particular table. We can think of
* a tableview is a subset of a table. It contains less than or
* equal to the number of entries of a table. A table view is often a
* result of a query.
*
* The view don't copy data from the table, but merely has a list of
* row-references into the original table with the real data.
*
* The class serves as a base class of all table view. It is a raw level table view, users
* are not encouraged to use this class. Instead users are advised to use
* the generated subclass version of the table.
*
* Let us assume we are going to keep track of a table to store the employees
* of a company or an organization.
*
* Following is a small example how to use the autogenerated class of the
* tableview. For that purpose we will first define the spec of an employee
* entity
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* Once this class is compiled along with TightDB annotation processor
* this will produce following classes.
*
* 1. Employee
* 2. EmployeeTable
* 3. EmployeeView.
*
* In this class context our interest is EmployeeView which will be inherited from
* this class.
*
* The generated class will have more specialized method to do operations on TableView.
*
*/
public class TableView implements TableOrView {
protected boolean DEBUG = false; //true;
/**
* Creates a TableViewBase with a Java Object Table and a already created
* native reference to a TableView. This method is not supposed to be
* called by a user of this db. It is for internal use only.
*
* @param table The table.
* @param nativePtr pointer to table.
*/
protected TableView(long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = null;
this.nativePtr = nativePtr;
}
/**
* Creates a TableView with already created Java TableView Object and a
* native native TableView object reference. The method is not supposed to
* be called by the user of the db. The method is for internal use only.
*
* @param tableView A table view.
* @param nativePtr pointer to table.
*/
protected TableView(TableView tableView, long nativePtr, boolean immutable){
this.immutable = immutable;
this.tableView = tableView;
this.nativePtr = nativePtr;
}
public void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private synchronized void close(){
if (DEBUG) System.err.println("==== TableView CLOSE, ptr= " + nativePtr);
if (nativePtr == 0)
return;
nativeClose(nativePtr);
nativePtr = 0;
}
protected native void nativeClose(long nativeViewPtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty(){
return size() == 0;
}
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeViewPtr);
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeViewPtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeViewPtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeViewPtr, long columnIndex);
/**
* Get the value of the particular (integer) cell.
*
* @param columnIndex 0 based index value of the column.
* @param rowIndex 0 based row value of the column.
* @return value of the particular cell.
*/
@Override
public long getLong(long columnIndex, long rowIndex){
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (boolean) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public boolean getBoolean(long columnIndex, long rowIndex){
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (float) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public float getFloat(long columnIndex, long rowIndex){
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (double) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public double getDouble(long columnIndex, long rowIndex){
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of the particular (date) cell.
*
* @param columnIndex 0 based index value of the cell column.
* @param rowIndex 0 based index of the row.
* @return value of the particular cell.
*/
@Override
public Date getDate(long columnIndex, long rowIndex){
return new Date(nativeGetDateTimeValue(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTimeValue(long nativeViewPtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex 0 based index value of the column
* @param rowIndex 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex){
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativeViewPtr, long columnInde, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex 0 based index value of the cell column
* @param rowIndex 0 based index value of the cell row
* @return value of the particular cell.
*/
<<<<<<< MINE
@Override
=======
/*
>>>>>>> YOURS
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex){
return nativeGetBinary(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetBinary(long nativeViewPtr, long columnIndex, long rowIndex);
<<<<<<< MINE
@Override
=======
*/
>>>>>>> YOURS
public byte[] getBinaryByteArray(long columnIndex, long rowIndex){
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
//TODO: NEW!!!
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex){
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
protected native Mixed nativeGetMixed(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public Table getSubTable(long columnIndex, long rowIndex){
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeViewPtr, long columnIndex, long rowIndex);
@Override
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Methods for setting values.
/**
* Sets the value for a particular (integer) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setLong(long columnIndex, long rowIndex, long value){
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeViewPtr, long columnIndex, long rowIndex, long value);
/**
* Sets the value for a particular (boolean) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value){
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeViewPtr, long columnIndex, long rowIndex, boolean value);
/**
* Sets the value for a particular (float) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setFloat(long columnIndex, long rowIndex, float value){
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeViewPtr, long columnIndex, long rowIndex, float value);
/**
* Sets the value for a particular (double) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDouble(long columnIndex, long rowIndex, double value){
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeViewPtr, long columnIndex, long rowIndex, double value);
/**
* Sets the value for a particular (date) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setDate(long columnIndex, long rowIndex, Date value){
if (immutable) throwImmutable();
nativeSetDateTimeValue(nativePtr, columnIndex, rowIndex, value.getTime()/1000);
}
protected native void nativeSetDateTimeValue(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
/**
* Sets the value for a particular (sting) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param value
*/
@Override
public void setString(long columnIndex, long rowIndex, String value){
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeViewPtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a particular (binary) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
<<<<<<< MINE
@Override
=======
/*
>>>>>>> YOURS
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data){
if (immutable) throwImmutable();
nativeSetBinary(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetBinary(long nativeViewPtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data){
if (immutable) throwImmutable();
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a particular (mixed typed) cell.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data){
if (immutable) throwImmutable();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeViewPtr, long columnIndex, long rowIndex, Mixed value);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New
<<<<<<< MINE
@Override
public void addLong(long columnIndex, long value) {
=======
public void adjust(long columnIndex, long value) {
>>>>>>> YOURS
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
// Methods for deleting.
@Override
public void clear(){
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeViewPtr);
/**
* Removes a particular row identified by the index from the tableview.
* The corresponding row of the underlying table also get deleted.
*
* @param rowIndex the row index
*/
@Override
public void remove(long rowIndex){
if (immutable) throwImmutable();
nativeRemoveRow(nativePtr, rowIndex);
}
protected native void nativeRemoveRow(long nativeViewPtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
if (!isEmpty()) {
nativeRemoveRow(nativePtr, size() - 1);
}
}
// Search for first match
@Override
public long findFirstLong(long columnIndex, long value){
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTableViewPtr, long columnIndex, long value);
//!!!TODO: New
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime()/1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value){
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativePtr, long columnIndex, String value);
// Search for all matches
// TODO..
@Override
public long lowerBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public long upperBoundLong(long columnIndex, long value) {
throw new RuntimeException("Not implemented yet");
}
@Override
public TableView findAllLong(long columnIndex, long value){
return new TableView(this, nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
//!!!TODO: New
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(this, nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
//!!!TODO: New
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(this, nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
//!!!TODO: New
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(this, nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
//!!!TODO: New
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(this, nativeFindAllDate(nativePtr, columnIndex, date.getTime()/1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value){
return new TableView(this, nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
//
// Integer Aggregates
//
/**
* Calculate the sum of the values in a particular column of this
* tableview.
*
* Note: the type of the column marked by the columnIndex has to be of
* type ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the sum of the values in the column
*/
<<<<<<< MINE
@Override
public long sum(long columnIndex){
return nativeSum(nativePtr, columnIndex);
=======
public long sumInt(long columnIndex){
return nativeSumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeSumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the maximum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the maximum value
*/
<<<<<<< MINE
@Override
public long maximum(long columnIndex){
return nativeMaximum(nativePtr, columnIndex);
=======
public long maximumInt(long columnIndex){
return nativeMaximumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeMaximumInt(long nativeViewPtr, long columnIndex);
/**
* Returns the minimum value of the cells in a column.
*
* Note: for this method to work the Type of the column
* identified by the columnIndex has to be ColumnType.ColumnTypeInt.
*
* @param columnIndex column index
* @return the minimum value
*/
<<<<<<< MINE
@Override
public long minimum(long columnIndex){
return nativeMinimum(nativePtr, columnIndex);
=======
public long minimumInt(long columnIndex){
return nativeMinimumInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native long nativeMinimumInt(long nativeViewPtr, long columnIndex);
<<<<<<< MINE
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
=======
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
>>>>>>> YOURS
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Float aggregates
@Override
public double sumFloat(long columnIndex){
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativeViewPtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex){
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativeViewPtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex){
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativeViewPtr, long columnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Double aggregates
@Override
public double sumDouble(long columnIndex){
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativeViewPtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex){
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativeViewPtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex){
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativeViewPtr, long columnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
// Sorting
public enum Order { ascending, descending };
public void sort(long columnIndex, Order order) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, (order == Order.ascending));
}
public void sort(long columnIndex) {
// Don't check for immutable. Sorting does not modify original table
nativeSort(nativePtr, columnIndex, true);
}
protected native void nativeSort(long nativeTableViewPtr, long columnIndex, boolean ascending);
protected native long createNativeTableView(Table table, long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeViewPtr);
@Override
public String toString() {
return nativeToString(nativePtr, 500);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
protected long nativePtr;
protected boolean immutable = false;
protected TableView tableView;
/* @Override
public long lookup(String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}*/
@Override
public long count(long columnIndex, String value) {
// TODO: implement
throw new RuntimeException("Not implemented yet.");
}
}
Diff Result
No diff
Case 85 - java_realmjava.rev_528df_698a7..Table.java
Base
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableDefinition {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
public void removeColumn(long columnIndex)
{
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
public ColumnType getMixedType(long columnIndex, long rowIndex)
{
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableDefinition {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
public void removeColumn(long columnIndex)
{
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
public ColumnType getMixedType(long columnIndex, long rowIndex)
{
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Left
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableDefinition {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableDefinition {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Right
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
public void removeColumn(long columnIndex)
{
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
public ColumnType getMixedType(long columnIndex, long rowIndex)
{
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
public void removeColumn(long columnIndex)
{
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
public ColumnType getColumnType(long columnIndex)
{
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
public ColumnType getMixedType(long columnIndex, long rowIndex)
{
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
MergeMethods
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableSchema getSubTableSchema(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn(ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable)
throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec() {
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable)
throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable)
throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable)
throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int) columnIndex];
switch(colTypes[(int) columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean) value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number) value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float) value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double) value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String) value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date) value).getTime() / 1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[]) value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer) value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int) columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[]) value).length;
for (int i = 0; i < rows; ++i) {
Object rowArr = ((Object[]) value)[i];
subtable.insert(i, (Object[]) rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
// FIXME: support other than allocateDirect
throw new RuntimeException("Currently ByteBuffer must be allocateDirect().");
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable)
throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable)
throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable)
throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
// FIXME: support other than allocateDirect
throw new RuntimeException("Currently ByteBuffer must be allocateDirect().");
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable)
throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable)
throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
public TableSchema getSubTableSchema(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn(ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable)
throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec() {
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable)
throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable)
throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable)
throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int) columnIndex];
switch(colTypes[(int) columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean) value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number) value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float) value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double) value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String) value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date) value).getTime() / 1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[]) value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer) value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int) columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[]) value).length;
for (int i = 0; i < rows; ++i) {
Object rowArr = ((Object[]) value)[i];
subtable.insert(i, (Object[]) rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
// FIXME: support other than allocateDirect
throw new RuntimeException("Currently ByteBuffer must be allocateDirect().");
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable)
throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable)
throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable)
throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
// FIXME: support other than allocateDirect
throw new RuntimeException("Currently ByteBuffer must be allocateDirect().");
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable)
throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable)
throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
KeepBothMethods
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableDefinition getSubTableDefinition(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
public TableSchema getSubTableSchema(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn(ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable)
throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec() {
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable)
throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable)
throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable)
throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int) columnIndex];
switch(colTypes[(int) columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean) value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number) value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float) value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double) value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String) value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date) value).getTime() / 1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[]) value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer) value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int) columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[]) value).length;
for (int i = 0; i < rows; ++i) {
Object rowArr = ((Object[]) value)[i];
subtable.insert(i, (Object[]) rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
// FIXME: support other than allocateDirect
throw new RuntimeException("Currently ByteBuffer must be allocateDirect().");
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable)
throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable)
throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable)
throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
// FIXME: support other than allocateDirect
throw new RuntimeException("Currently ByteBuffer must be allocateDirect().");
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable)
throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable)
throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableDefinition getSubTableDefinition(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
public TableSchema getSubTableSchema(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn(ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable)
throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec() {
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable)
throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable)
throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable)
throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int) columnIndex];
switch(colTypes[(int) columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean) value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number) value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float) value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double) value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String) value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date) value).getTime() / 1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[]) value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer) value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int) columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[]) value).length;
for (int i = 0; i < rows; ++i) {
Object rowArr = ((Object[]) value)[i];
subtable.insert(i, (Object[]) rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
// FIXME: support other than allocateDirect
throw new RuntimeException("Currently ByteBuffer must be allocateDirect().");
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable)
throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable)
throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable)
throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
// FIXME: support other than allocateDirect
throw new RuntimeException("Currently ByteBuffer must be allocateDirect().");
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable)
throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable)
throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Safe
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
<<<<<<< MINE
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
=======
@Override
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
>>>>>>> YOURS
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
<<<<<<< MINE
public TableSchema getSubTableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
=======
@Override
public TableDefinition getSubTableDefinition(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableDefinition(nativePtr, newPath);
}
>>>>>>> YOURS
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Unstructured
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
<<<<<<< MINE
@Override
public TableDefinition getSubTableDefinition(long columnIndex) {
=======
public TableSchema getSubTableSchema(long columnIndex) {
>>>>>>> YOURS
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
<<<<<<< MINE
@Override
public TableDefinition getSubTableDefinition(long columnIndex) {
=======
public TableSchema getSubTableSchema(long columnIndex) {
>>>>>>> YOURS
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubTableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param name column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String name) {
long columnCount = getColumnCount();
for (long i = 0; i < columnCount; i++) {
if (name.equals(getColumnName(i))) {
return i;
}
}
return -1;
}
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
public long add(Object... values) {
long rowIndex = size();
insert(rowIndex, values);
return rowIndex;
}
public void insert(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case ColumnTypeBool:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case ColumnTypeInt:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case ColumnTypeFloat:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case ColumnTypeDouble:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case ColumnTypeString:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case ColumnTypeDate:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case ColumnTypeMixed:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case ColumnTypeBinary:
if (value instanceof byte[])
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
else if (value instanceof ByteBuffer)
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, (ByteBuffer)value);
break;
case ColumnTypeTable:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
insertDone();
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.insert(i, (Object[])rowArr);
}
}
}
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
insert(rowIndex, values);
}
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("ByteBuffer is null");
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeInsertByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void addLong(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.ColumnTypeString)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sum(long columnIndex) {
return nativeSum(nativePtr, columnIndex);
}
protected native long nativeSum(long nativePtr, long columnIndex);
@Override
public long maximum(long columnIndex) {
return nativeMaximum(nativePtr, columnIndex);
}
protected native long nativeMaximum(long nativePtr, long columnIndex);
@Override
public long minimum(long columnIndex) {
return nativeMinimum(nativePtr, columnIndex);
}
protected native long nativeMinimum(long nativePtr, long columnnIndex);
@Override
public double average(long columnIndex) {
return nativeAverage(nativePtr, columnIndex);
}
protected native double nativeAverage(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with index
@Override
public long lookup(String value) {
if (!this.hasIndex(0) || this.getColumnType(0) != ColumnType.ColumnTypeString)
throw new RuntimeException("lookup() requires index on column 0 which must be a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView distinct(long columnIndex) {
return new TableView(nativeDistinct(nativePtr, columnIndex), immutable);
}
protected native long nativeDistinct(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Diff Result
No diff
Case 86 - java_realmjava.rev_7ec56_c3440..Table.java
Base
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
this.parent = parent;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
return new TableView(nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending)), immutable);
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
return new TableView(nativeGetSortedView(nativePtr, columnIndex, true), immutable);
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
this.parent = parent;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
return new TableView(nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending)), immutable);
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
return new TableView(nativeGetSortedView(nativePtr, columnIndex, true), immutable);
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Left
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
this.parent = parent;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
return new TableView(nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending)), immutable);
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
return new TableView(nativeGetSortedView(nativePtr, columnIndex, true), immutable);
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubtable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubtable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.nio.ByteBuffer;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.internal.CloseMutex;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
protected Table(Object parent, long nativePtr, boolean immutable) {
this.immutable = immutable;
this.nativePtr = nativePtr;
this.parent = parent;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void finalize() throws Throwable {
if (DEBUG) System.err.println("==== FINALIZE " + tableNo + "...");
try {
close();
} finally {
super.finalize();
}
}
private void close() {
synchronized (CloseMutex.getInstance()) {
if (nativePtr == 0) {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
return;
}
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
nativeClose(nativePtr);
nativePtr = 0;
}
}
protected native void nativeClose(long nativeTablePtr);
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
return new TableView(nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending)), immutable);
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
return new TableView(nativeGetSortedView(nativePtr, columnIndex, true), immutable);
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubtable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubtable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
return new TableQuery(nativeWhere(nativePtr), immutable);
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
return new TableView(nativeFindAllInt(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
return new TableView(nativeFindAllBool(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
return new TableView(nativeFindAllFloat(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
return new TableView(nativeFindAllDouble(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
return new TableView(nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000), immutable);
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
return new TableView(nativeFindAllString(nativePtr, columnIndex, value), immutable);
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
return new TableView(nativeGetDistinctView(nativePtr, columnIndex), immutable);
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Right
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
// If close() is called, no penalty is paid for delayed disposal
// via the context
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// Accessing `nativePointer` without synchronization as we
// assume that close() is never called on behalf of a
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this,nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
// If close() is called, no penalty is paid for delayed disposal
// via the context
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// Accessing `nativePointer` without synchronization as we
// assume that close() is never called on behalf of a
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubTableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubTable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubTable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
protected native long nativeGetSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this,nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
private native long nativeGetSubTableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubTableSize(long columnIndex, long rowIndex) {
return nativeGetSubTableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubTableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubTable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubTable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubTable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
MergeMethods
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn(ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable)
throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec() {
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable)
throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable)
throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable)
throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int) columnIndex];
switch(colTypes[(int) columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean) value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number) value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float) value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double) value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String) value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date) value).getTime() / 1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[]) value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int) columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[]) value).length;
for (int i = 0; i < rows; ++i) {
Object rowArr = ((Object[]) value)[i];
subtable.addAt(i, (Object[]) rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods() {
return this.internal;
}
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable)
throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable)
throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubtable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
} catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this, nativeSubtablePtr, immutable);
} catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable)
throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable)
throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn(ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable)
throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec() {
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable)
throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable)
throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable)
throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int) columnIndex];
switch(colTypes[(int) columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean) value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number) value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float) value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double) value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String) value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date) value).getTime() / 1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[]) value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int) columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[]) value).length;
for (int i = 0; i < rows; ++i) {
Object rowArr = ((Object[]) value)[i];
subtable.addAt(i, (Object[]) rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods() {
return this.internal;
}
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable)
throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable)
throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubtable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
} catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this, nativeSubtablePtr, immutable);
} catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable)
throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable)
throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
KeepBothMethods
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn(ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable)
throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec() {
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable)
throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable)
throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable)
throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int) columnIndex];
switch(colTypes[(int) columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean) value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number) value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float) value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double) value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String) value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date) value).getTime() / 1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[]) value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int) columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[]) value).length;
for (int i = 0; i < rows; ++i) {
Object rowArr = ((Object[]) value)[i];
subtable.addAt(i, (Object[]) rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods() {
return this.internal;
}
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable)
throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable)
throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubtable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubtable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
} catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this, nativeSubtablePtr, immutable);
} catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable)
throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable)
throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if (nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn(ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable)
throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable)
throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec() {
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable)
throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable)
throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable)
throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable)
throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable)
throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int) columnIndex];
switch(colTypes[(int) columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean) value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number) value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float) value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double) value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String) value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date) value).getTime() / 1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[]) value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int) columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[]) value).length;
for (int i = 0; i < rows; ++i) {
Object rowArr = ((Object[]) value)[i];
subtable.addAt(i, (Object[]) rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable)
throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) + " must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int) getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" + String.valueOf(values.length) + ") does not match the number of columns in the table (" + String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) + ". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods() {
return this.internal;
}
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable)
throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable)
throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable)
throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex) * 1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubtable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubtable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable)
throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
} catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this, nativeSubtablePtr, immutable);
} catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable)
throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable)
throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable)
throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable)
throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable)
throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable)
throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable)
throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable)
throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable)
throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable)
throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Safe
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// Accessing `nativePointer` without synchronization as we
// assume that close() is never called on behalf of a
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubtable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
<<<<<<< MINE
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this,nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
=======
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
>>>>>>> YOURS
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// Accessing `nativePointer` without synchronization as we
// assume that close() is never called on behalf of a
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid(){
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods {
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubtable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtable(nativePtr, columnIndex, rowIndex), immutable);
}
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
public Table getSubTable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
// Below version will allow to getSubTable when number of available rows are not updated yet -
// which happens before an insertDone().
<<<<<<< MINE
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this,nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
}
=======
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
}
>>>>>>> YOURS
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Unstructured
package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
// If close() is called, no penalty is paid for delayed disposal
// via the context
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// Accessing `nativePointer` without synchronization as we
// assume that close() is never called on behalf of a
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
<<<<<<< MINE
public Table getSubtable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtable(nativePtr, columnIndex, rowIndex), immutable);
=======
public Table getSubTable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
>>>>>>> YOURS
}
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubtable when number of available rows are not updated yet -
// which happens before an insertDone().
<<<<<<< MINE
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
=======
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this,nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
>>>>>>> YOURS
}
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}package com.tightdb;
import java.util.Date;
import com.tightdb.TableView.Order;
import com.tightdb.typed.TightDB;
/*
Add isEqual(Table)
*/
/**
* This class is a base class for all TightDB tables. The class supports all low
* level methods (define/insert/delete/update) a table has. All the native
* communications to the TightDB C++ library are also handled by this class.
*
* A user who wants to create a table of his choice will automatically inherit
* from this class by the tightdb-class generator.
*
* As an example, let's create a table to keep records of an employee in a
* company.
*
* For this purpose we will create a class named "employee" with an Entity
* annotation as follows.
*
* @DefineTable
* public class employee {
* String name;
* long age;
* boolean hired;
* byte[] imageData;
* }
*
* The tightdb class generator will generate classes relevant to the employee:
*
* 1. Employee.java: Represents one employee of the employee table i.e., a single row. Getter/setter
* methods are declared from which you will be able to set/get values
* for a particular employee.
* 2. EmployeeTable.java: Represents the class for storing a collection of employee i.e., a table
* of rows. The class is inherited from the TableBase class as described above.
* It has all the high level methods to manipulate Employee objects from the table.
* 3. EmployeeView.java: Represents view of the employee table i.e., result set of queries.
*
*
*/
public class Table implements TableOrView, TableSchema {
public static final long INFINITE = -1;
protected long nativePtr;
protected boolean immutable = false;
protected Object parent = null;
private Context context = null;
// test:
protected int tableNo;
protected boolean DEBUG = false;
protected static int TableCount = 0;
static {
TightDB.loadLibrary();
}
/**
* Construct a Table base object. It can be used to register columns in this
* table. Registering into table is allowed only for empty tables. It
* creates a native reference of the object and keeps a reference to it.
*/
public Table() {
context = new Context();
// Native methods work will be initialized here. Generated classes will
// have nothing to do with the native functions. Generated Java Table
// classes will work as a wrapper on top of table.
nativePtr = createNative();
if (nativePtr == 0)
throw new OutOfMemoryError("Out of native memory.");
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("====== New Tablebase " + tableNo + " : ptr = " + nativePtr);
}
}
protected native long createNative();
Table(Context context, Object parent, long nativePointer, boolean immutable) {
this.immutable = immutable;
this.context = context;
this.parent = parent;
this.nativePtr = nativePointer;
if (DEBUG) {
tableNo = ++TableCount;
System.err.println("===== New Tablebase(ptr) " + tableNo + " : ptr = " + nativePtr);
}
}
// If close() is called, no penalty is paid for delayed disposal
// via the context
@Override
public void close() {
if (nativePtr != 0) {
nativeClose(nativePtr);
nativePtr = 0;
if (DEBUG) {
TableCount--;
System.err.println("==== CLOSE " + tableNo + " ptr= " + nativePtr + " remaining " + TableCount);
}
} else {
if (DEBUG)
System.err.println(".... CLOSE ignored.");
}
}
protected static native void nativeClose(long nativeTablePtr);
@Override
protected void finalize() {
// Accessing `nativePointer` without synchronization as we
// assume that close() is never called on behalf of a
// finalizer thread
if (this.nativePtr != 0) {
boolean isRoot = (parent == null);
context.asyncDisposeTable(this.nativePtr, isRoot);
}
if (DEBUG)
System.err.println("==== FINALIZE " + tableNo + "...");
}
/*
* Check if the Table is valid.
* Whenever a Table/subtable is changed/updated all it's subtables are invalidated.
* You can no longer perform any actions on the table, and if done anyway, an exception is thrown.
* The only method you can call is 'isValid()'.
*/
public boolean isValid() {
if (nativePtr == 0)
return false;
return nativeIsValid(nativePtr);
}
protected static native boolean nativeIsValid(long nativeTablePtr);
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// Has to work for all the typed tables as well
if (!(other instanceof Table))
return false;
Table otherTable = (Table) other;
return nativeEquals(nativePtr, otherTable.nativePtr);
}
protected native boolean nativeEquals(long nativeTablePtr, long nativeTableToComparePtr);
private void verifyColumnName(String name) {
if (name.length() > 63) {
throw new IllegalArgumentException("Column names are currently limited to max 63 characters.");
}
}
@Override
public TableSchema getSubtableSchema(long columnIndex) {
if(nativeIsRootTable(nativePtr) == false)
throw new UnsupportedOperationException("This is a subtable. Can only be called on root table.");
long[] newPath = new long[1];
newPath[0] = columnIndex;
return new SubtableSchema(nativePtr, newPath);
}
protected native boolean nativeIsRootTable(long nativeTablePtr);
/**
* Add a column to the table dynamically.
* @return Index of the new column.
*/
@Override
public long addColumn (ColumnType type, String name) {
verifyColumnName(name);
return nativeAddColumn(nativePtr, type.getValue(), name);
}
protected native long nativeAddColumn(long nativeTablePtr, int type, String name);
/**
* Remove a column in the table dynamically.
*/
@Override
public void removeColumn(long columnIndex) {
nativeRemoveColumn(nativePtr, columnIndex);
}
protected native void nativeRemoveColumn(long nativeTablePtr, long columnIndex);
/**
* Rename a column in the table.
*/
@Override
public void renameColumn(long columnIndex, String newName) {
verifyColumnName(newName);
nativeRenameColumn(nativePtr, columnIndex, newName);
}
protected native void nativeRenameColumn(long nativeTablePtr, long columnIndex, String name);
/**
* Updates a table specification from a Table specification structure.
* Supported types - refer to @see ColumnType.
*
* @param columnType
* data type of the column @see <code>ColumnType</code>
* @param columnName
* name of the column. Duplicate column name is not allowed.
*/
public void updateFromSpec(TableSpec tableSpec) {
if (immutable) throwImmutable();
nativeUpdateFromSpec(nativePtr, tableSpec);
}
protected native void nativeUpdateFromSpec(long nativeTablePtr, TableSpec tableSpec);
// Table Size and deletion. AutoGenerated subclasses are nothing to do with this
// class.
/**
* Get the number of entries/rows of this table.
*
* @return The number of rows.
*/
@Override
public long size() {
return nativeSize(nativePtr);
}
protected native long nativeSize(long nativeTablePtr);
/**
* Checks whether this table is empty or not.
*
* @return true if empty, otherwise false.
*/
@Override
public boolean isEmpty() {
return size() == 0;
}
/**
* Clears the table i.e., deleting all rows in the table.
*/
@Override
public void clear() {
if (immutable) throwImmutable();
nativeClear(nativePtr);
}
protected native void nativeClear(long nativeTablePtr);
// Column Information.
/**
* Returns the number of columns in the table.
*
* @return the number of columns.
*/
@Override
public long getColumnCount() {
return nativeGetColumnCount(nativePtr);
}
protected native long nativeGetColumnCount(long nativeTablePtr);
public TableSpec getTableSpec(){
return nativeGetTableSpec(nativePtr);
}
protected native TableSpec nativeGetTableSpec(long nativeTablePtr);
/**
* Returns the name of a column identified by columnIndex. Notice that the
* index is zero based.
*
* @param columnIndex the column index
* @return the name of the column
*/
@Override
public String getColumnName(long columnIndex) {
return nativeGetColumnName(nativePtr, columnIndex);
}
protected native String nativeGetColumnName(long nativeTablePtr, long columnIndex);
/**
* Returns the 0-based index of a column based on the name.
*
* @param columnName column name
* @return the index, -1 if not found
*/
@Override
public long getColumnIndex(String columnName) {
if (columnName == null)
throw new NullPointerException("Column name can not be null.");
return nativeGetColumnIndex(nativePtr, columnName);
}
protected native long nativeGetColumnIndex(long nativeTablePtr, String columnName);
/**
* Get the type of a column identified by the columnIdex.
*
* @param columnIndex index of the column.
* @return Type of the particular column.
*/
@Override
public ColumnType getColumnType(long columnIndex) {
return ColumnType.fromNativeValue(nativeGetColumnType(nativePtr, columnIndex));
}
protected native int nativeGetColumnType(long nativeTablePtr, long columnIndex);
/**
* Removes a row from the specific index. As of now the entry is simply
* removed from the table.
*
* @param rowIndex the row index (starting with 0)
*
*/
@Override
public void remove(long rowIndex) {
if (immutable) throwImmutable();
nativeRemove(nativePtr, rowIndex);
}
protected native void nativeRemove(long nativeTablePtr, long rowIndex);
@Override
public void removeLast() {
if (immutable) throwImmutable();
nativeRemoveLast(nativePtr);
}
protected native void nativeRemoveLast(long nativeTablePtr);
/**
* EXPERIMENTAL function
*/
public void moveLastOver(long rowIndex) {
if (immutable) throwImmutable();
nativeMoveLastOver(nativePtr, rowIndex);
}
protected native void nativeMoveLastOver(long nativeTablePtr, long rowIndex);
// Row Handling methods.
public long addEmptyRow() {
if (immutable) throwImmutable();
return nativeAddEmptyRow(nativePtr, 1);
}
public long addEmptyRows(long rows) {
if (immutable) throwImmutable();
if (rows < 1)
throw new IllegalArgumentException("'rows' must be > 0.");
return nativeAddEmptyRow(nativePtr, rows);
}
protected native long nativeAddEmptyRow(long nativeTablePtr, long rows);
/**
* Appends the specified row to the end of the table
* @param values
* @return The row index of the appended row
*/
public long add(Object... values) {
long rowIndex = size();
addAt(rowIndex, values);
return rowIndex;
}
/**
* Inserts a row at the specified row index. Shifts the row currently at that row index and any subsequent rows down (adds one to their row index).
* @param rowIndex
* @param values
*/
public void addAt(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex > size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be <= table.size() " + String.valueOf(size) + ".");
}
// Check values types
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
//String representation of the provided value type
String providedType;
if (value == null)
providedType = "null";
else
providedType = value.getClass().toString();
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + providedType + ".");
}
}
// Insert values
for (long columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[(int)columnIndex];
switch (colTypes[(int)columnIndex]) {
case BOOLEAN:
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, (Boolean)value);
break;
case INTEGER:
nativeInsertLong(nativePtr, columnIndex, rowIndex, ((Number)value).longValue());
break;
case FLOAT:
nativeInsertFloat(nativePtr, columnIndex, rowIndex, ((Float)value).floatValue());
break;
case DOUBLE:
nativeInsertDouble(nativePtr, columnIndex, rowIndex, ((Double)value).doubleValue());
break;
case STRING:
nativeInsertString(nativePtr, columnIndex, rowIndex, (String)value);
break;
case DATE:
nativeInsertDate(nativePtr, columnIndex, rowIndex, ((Date)value).getTime()/1000);
break;
case MIXED:
nativeInsertMixed(nativePtr, columnIndex, rowIndex, Mixed.mixedValue(value));
break;
case BINARY:
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, (byte[])value);
break;
case TABLE:
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, value);
break;
default:
throw new RuntimeException("Unexpected columnType: " + String.valueOf(colTypes[(int)columnIndex]));
}
}
//Insert done. Use native, no need to check for immutable again here
nativeInsertDone(nativePtr);
}
private void insertSubtableValues(long rowIndex, long columnIndex, Object value) {
if (value != null) {
// insert rows in subtable recursively
Table subtable = getSubtableDuringInsert(columnIndex, rowIndex);
int rows = ((Object[])value).length;
for (int i=0; i<rows; ++i) {
Object rowArr = ((Object[])value)[i];
subtable.addAt(i, (Object[])rowArr);
}
}
}
/**
* Returns a view sorted by the specified column and order
* @param columnIndex
* @param order
* @return
*/
public TableView getSortedView(long columnIndex, Order order){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, (order == Order.ascending));
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
/**
* Returns a view sorted by the specified column by the default order
* @param columnIndex
* @return
*/
public TableView getSortedView(long columnIndex){
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeGetSortedView(nativePtr, columnIndex, true);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetSortedView(long nativeTableViewPtr, long columnIndex, boolean ascending);
/**
* Replaces the row at the specified position with the specified row.
* @param rowIndex
* @param values
*/
public void set(long rowIndex, Object... values) {
if (immutable) throwImmutable();
// Check index
long size = size();
if (rowIndex >= size) {
throw new IllegalArgumentException("rowIndex " + String.valueOf(rowIndex) +
" must be < table.size() " + String.valueOf(size) + ".");
}
// Verify number of 'values'
int columns = (int)getColumnCount();
if (columns != values.length) {
throw new IllegalArgumentException("The number of value parameters (" +
String.valueOf(values.length) +
") does not match the number of columns in the table (" +
String.valueOf(columns) + ").");
}
// Verify type of 'values'
ColumnType colTypes[] = new ColumnType[columns];
for (int columnIndex = 0; columnIndex < columns; columnIndex++) {
Object value = values[columnIndex];
ColumnType colType = getColumnType(columnIndex);
colTypes[columnIndex] = colType;
if (!colType.matchObject(value)) {
throw new IllegalArgumentException("Invalid argument no " + String.valueOf(1 + columnIndex) +
". Expected a value compatible with column type " + colType + ", but got " + value.getClass() + ".");
}
}
// Now that all values are verified, we can remove the row and insert it again.
// TODO: Can be optimized to only set the values (but clear any subtables)
remove(rowIndex);
addAt(rowIndex, values);
}
//Instance of the inner class InternalMethods.
private InternalMethods internal = new InternalMethods();
//Returns InternalMethods instance with public internal methods. Should only be called by AbstractTable
public InternalMethods getInternalMethods(){
return this.internal;
}
//Holds methods that must be publicly available for AbstractClass.
//Should not be called when using the dynamic interface. The methods can be accessed by calling getInternalMethods() in Table class
public class InternalMethods{
public void insertLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeInsertLong(nativePtr, columnIndex, rowIndex, value);
}
public void insertDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeInsertDouble(nativePtr, columnIndex, rowIndex, value);
}
public void insertFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeInsertFloat(nativePtr, columnIndex, rowIndex, value);
}
public void insertBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeInsertBoolean(nativePtr, columnIndex, rowIndex, value);
}
public void insertDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeInsertDate(nativePtr, columnIndex, rowIndex, date.getTime()/1000);
}
public void insertString(long columnIndex, long rowIndex, String value) {
if (immutable) throwImmutable();
nativeInsertString(nativePtr, columnIndex, rowIndex, value);
}
public void insertMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
nativeInsertMixed(nativePtr, columnIndex, rowIndex, data);
}
/*
public void insertBinary(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
//System.err.printf("\ninsertBinary(col %d, row %d, ByteBuffer)\n", columnIndex, rowIndex);
//System.err.println("-- HasArray: " + (data.hasArray() ? "yes":"no") + " len= " + data.array().length);
if (data.isDirect())
nativeInsertByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
*/
public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if(data != null)
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
else
throw new NullPointerException("byte[] must not be null. Alternatively insert empty array.");
}
public void insertSubtable(long columnIndex, long rowIndex, Object[][] values) {
if (immutable) throwImmutable();
nativeInsertSubtable(nativePtr, columnIndex, rowIndex);
insertSubtableValues(rowIndex, columnIndex, values);
}
public void insertDone() {
if (immutable) throwImmutable();
nativeInsertDone(nativePtr);
}
}
protected native void nativeInsertFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
protected native void nativeInsertDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
protected native void nativeInsertLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
protected native void nativeInsertBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
protected native void nativeInsertDate(long nativePtr, long columnIndex, long rowIndex, long dateTimeValue);
protected native void nativeInsertString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
protected native void nativeInsertMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed mixed);
/* public void insertBinary(long columnIndex, long rowIndex, byte[] data) {
if (data == null)
throw new NullPointerException("Null Array");
if (immutable) throwImmutable();
nativeInsertByteArray(nativePtr, columnIndex, rowIndex, data);
}*/
protected native void nativeInsertByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
protected native void nativeInsertSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
protected native void nativeInsertDone(long nativeTablePtr);
//
// Getters
//
@Override
public long getLong(long columnIndex, long rowIndex) {
return nativeGetLong(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetLong(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public boolean getBoolean(long columnIndex, long rowIndex) {
return nativeGetBoolean(nativePtr, columnIndex, rowIndex);
}
protected native boolean nativeGetBoolean(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public float getFloat(long columnIndex, long rowIndex) {
return nativeGetFloat(nativePtr, columnIndex, rowIndex);
}
protected native float nativeGetFloat(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public double getDouble(long columnIndex, long rowIndex) {
return nativeGetDouble(nativePtr, columnIndex, rowIndex);
}
protected native double nativeGetDouble(long nativeTablePtr, long columnIndex, long rowIndex);
@Override
public Date getDate(long columnIndex, long rowIndex) {
return new Date(nativeGetDateTime(nativePtr, columnIndex, rowIndex)*1000);
}
protected native long nativeGetDateTime(long nativeTablePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (string )cell.
*
* @param columnIndex
* 0 based index value of the column
* @param rowIndex
* 0 based index of the row.
* @return value of the particular cell
*/
@Override
public String getString(long columnIndex, long rowIndex) {
return nativeGetString(nativePtr, columnIndex, rowIndex);
}
protected native String nativeGetString(long nativePtr, long columnIndex, long rowIndex);
/**
* Get the value of a (binary) cell.
*
* @param columnIndex
* 0 based index value of the cell column
* @param rowIndex
* 0 based index value of the cell row
* @return value of the particular cell.
*/
/*
@Override
public ByteBuffer getBinaryByteBuffer(long columnIndex, long rowIndex) {
return nativeGetByteBuffer(nativePtr, columnIndex, rowIndex);
}
protected native ByteBuffer nativeGetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex);
*/
@Override
public byte[] getBinaryByteArray(long columnIndex, long rowIndex) {
return nativeGetByteArray(nativePtr, columnIndex, rowIndex);
}
protected native byte[] nativeGetByteArray(long nativePtr, long columnIndex, long rowIndex);
@Override
public Mixed getMixed(long columnIndex, long rowIndex) {
return nativeGetMixed(nativePtr, columnIndex, rowIndex);
}
@Override
public ColumnType getMixedType(long columnIndex, long rowIndex) {
return ColumnType.fromNativeValue(nativeGetMixedType(nativePtr, columnIndex, rowIndex));
}
protected native int nativeGetMixedType(long nativePtr, long columnIndex, long rowIndex);
protected native Mixed nativeGetMixed(long nativeTablePtr, long columnIndex, long rowIndex);
/**
*
* Note: The subtable returned will have to be closed again after use.
* You can let javas garbage collector handle that or better yet call close()
* after use.
*
* @param columnIndex column index of the cell
* @param rowIndex row index of the cell
* @return TableBase the subtable at the requested cell
*/
@Override
<<<<<<< MINE
public Table getSubtable(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtable(nativePtr, columnIndex, rowIndex), immutable);
=======
public Table getSubTable(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTable(nativePtr, columnIndex, rowIndex);
try {
// Copy context reference from parent
return new Table(context, this, nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
>>>>>>> YOURS
}
protected native long nativeGetSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
// Below version will allow to getSubtable when number of available rows are not updated yet -
// which happens before an insertDone().
<<<<<<< MINE
private Table getSubtableDuringInsert(long columnIndex, long rowIndex) {
return new Table(this, nativeGetSubtableDuringInsert(nativePtr, columnIndex, rowIndex), immutable);
=======
private Table getSubTableDuringInsert(long columnIndex, long rowIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeSubtablePtr = nativeGetSubTableDuringInsert(nativePtr, columnIndex, rowIndex);
try {
return new Table(this.context, this,nativeSubtablePtr, immutable);
}
catch (RuntimeException e) {
nativeClose(nativeSubtablePtr);
throw e;
}
>>>>>>> YOURS
}
private native long nativeGetSubtableDuringInsert(long nativeTablePtr, long columnIndex, long rowIndex);
public long getSubtableSize(long columnIndex, long rowIndex) {
return nativeGetSubtableSize(nativePtr, columnIndex, rowIndex);
}
protected native long nativeGetSubtableSize(long nativeTablePtr, long columnIndex, long rowIndex);
public void clearSubtable(long columnIndex, long rowIndex) {
if (immutable) throwImmutable();
nativeClearSubtable(nativePtr, columnIndex, rowIndex);
}
protected native void nativeClearSubtable(long nativeTablePtr, long columnIndex, long rowIndex);
//
// Setters
//
@Override
public void setLong(long columnIndex, long rowIndex, long value) {
if (immutable) throwImmutable();
nativeSetLong(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetLong(long nativeTablePtr, long columnIndex, long rowIndex, long value);
@Override
public void setBoolean(long columnIndex, long rowIndex, boolean value) {
if (immutable) throwImmutable();
nativeSetBoolean(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetBoolean(long nativeTablePtr, long columnIndex, long rowIndex, boolean value);
@Override
public void setFloat(long columnIndex, long rowIndex, float value) {
if (immutable) throwImmutable();
nativeSetFloat(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetFloat(long nativeTablePtr, long columnIndex, long rowIndex, float value);
@Override
public void setDouble(long columnIndex, long rowIndex, double value) {
if (immutable) throwImmutable();
nativeSetDouble(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetDouble(long nativeTablePtr, long columnIndex, long rowIndex, double value);
@Override
public void setDate(long columnIndex, long rowIndex, Date date) {
if (immutable) throwImmutable();
nativeSetDate(nativePtr, columnIndex, rowIndex, date.getTime() / 1000);
}
protected native void nativeSetDate(long nativeTablePtr, long columnIndex, long rowIndex, long dateTimeValue);
@Override
public void setString(long columnIndex, long rowIndex, String value) {
if (value == null)
throw new NullPointerException("Null String is not allowed.");
if (immutable) throwImmutable();
nativeSetString(nativePtr, columnIndex, rowIndex, value);
}
protected native void nativeSetString(long nativeTablePtr, long columnIndex, long rowIndex, String value);
/**
* Sets the value for a (binary) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
* the ByteBuffer must be allocated with ByteBuffer.allocateDirect(len)
*/
/*
@Override
public void setBinaryByteBuffer(long columnIndex, long rowIndex, ByteBuffer data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null array");
if (data.isDirect())
nativeSetByteBuffer(nativePtr, columnIndex, rowIndex, data);
else
throw new RuntimeException("Currently ByteBuffer must be allocateDirect()."); // FIXME: support other than allocateDirect
}
protected native void nativeSetByteBuffer(long nativeTablePtr, long columnIndex, long rowIndex, ByteBuffer data);
*/
@Override
public void setBinaryByteArray(long columnIndex, long rowIndex, byte[] data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException("Null Array");
nativeSetByteArray(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetByteArray(long nativePtr, long columnIndex, long rowIndex, byte[] data);
/**
* Sets the value for a (mixed typed) cell.
*
* @param columnIndex
* column index of the cell
* @param rowIndex
* row index of the cell
* @param data
*/
@Override
public void setMixed(long columnIndex, long rowIndex, Mixed data) {
if (immutable) throwImmutable();
if (data == null)
throw new NullPointerException();
nativeSetMixed(nativePtr, columnIndex, rowIndex, data);
}
protected native void nativeSetMixed(long nativeTablePtr, long columnIndex, long rowIndex, Mixed data);
/**
* Add the value for to all cells in the column.
*
* @param columnIndex column index of the cell
* @param value
*/
//!!!TODO: New. Support in highlevel API
@Override
public void adjust(long columnIndex, long value) {
if (immutable) throwImmutable();
nativeAddInt(nativePtr, columnIndex, value);
}
protected native void nativeAddInt(long nativeViewPtr, long columnIndex, long value);
public void setIndex(long columnIndex) {
if (immutable) throwImmutable();
if (getColumnType(columnIndex) != ColumnType.STRING)
throw new IllegalArgumentException("Index is only supported on string columns.");
nativeSetIndex(nativePtr, columnIndex);
}
protected native void nativeSetIndex(long nativePtr, long columnIndex);
public boolean hasIndex(long columnIndex) {
return nativeHasIndex(nativePtr, columnIndex);
}
protected native boolean nativeHasIndex(long nativePtr, long columnIndex);
//
// Aggregate functions
//
// Integers
@Override
public long sumInt(long columnIndex) {
return nativeSumInt(nativePtr, columnIndex);
}
protected native long nativeSumInt(long nativePtr, long columnIndex);
@Override
public long maximumInt(long columnIndex) {
return nativeMaximumInt(nativePtr, columnIndex);
}
protected native long nativeMaximumInt(long nativePtr, long columnIndex);
@Override
public long minimumInt(long columnIndex) {
return nativeMinimumInt(nativePtr, columnIndex);
}
protected native long nativeMinimumInt(long nativePtr, long columnnIndex);
@Override
public double averageInt(long columnIndex) {
return nativeAverageInt(nativePtr, columnIndex);
}
protected native double nativeAverageInt(long nativePtr, long columnIndex);
// Floats
@Override
public double sumFloat(long columnIndex) {
return nativeSumFloat(nativePtr, columnIndex);
}
protected native double nativeSumFloat(long nativePtr, long columnIndex);
@Override
public float maximumFloat(long columnIndex) {
return nativeMaximumFloat(nativePtr, columnIndex);
}
protected native float nativeMaximumFloat(long nativePtr, long columnIndex);
@Override
public float minimumFloat(long columnIndex) {
return nativeMinimumFloat(nativePtr, columnIndex);
}
protected native float nativeMinimumFloat(long nativePtr, long columnnIndex);
@Override
public double averageFloat(long columnIndex) {
return nativeAverageFloat(nativePtr, columnIndex);
}
protected native double nativeAverageFloat(long nativePtr, long columnIndex);
// Doubles
@Override
public double sumDouble(long columnIndex) {
return nativeSumDouble(nativePtr, columnIndex);
}
protected native double nativeSumDouble(long nativePtr, long columnIndex);
@Override
public double maximumDouble(long columnIndex) {
return nativeMaximumDouble(nativePtr, columnIndex);
}
protected native double nativeMaximumDouble(long nativePtr, long columnIndex);
@Override
public double minimumDouble(long columnIndex) {
return nativeMinimumDouble(nativePtr, columnIndex);
}
protected native double nativeMinimumDouble(long nativePtr, long columnnIndex);
@Override
public double averageDouble(long columnIndex) {
return nativeAverageDouble(nativePtr, columnIndex);
}
protected native double nativeAverageDouble(long nativePtr, long columnIndex);
//
// Count
//
public long count(long columnIndex, long value) {
return nativeCountLong(nativePtr, columnIndex, value);
}
protected native long nativeCountLong(long nativePtr, long columnIndex, long value);
public long count(long columnIndex, float value) {
return nativeCountFloat(nativePtr, columnIndex, value);
}
protected native long nativeCountFloat(long nativePtr, long columnIndex, float value);
public long count(long columnIndex, double value) {
return nativeCountDouble(nativePtr, columnIndex, value);
}
protected native long nativeCountDouble(long nativePtr, long columnIndex, double value);
@Override
public long count(long columnIndex, String value) {
return nativeCountString(nativePtr, columnIndex, value);
}
protected native long nativeCountString(long nativePtr, long columnIndex, String value);
//
// Searching methods.
//
@Override
public TableQuery where() {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeQueryPtr = nativeWhere(nativePtr);
try {
// Copy context reference from parent
return new TableQuery(this.context, nativeQueryPtr, immutable);
} catch (RuntimeException e) {
TableQuery.nativeClose(nativeQueryPtr);
throw e;
}
}
protected native long nativeWhere(long nativeTablePtr);
@Override
public long findFirstLong(long columnIndex, long value) {
return nativeFindFirstInt(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstInt(long nativeTablePtr, long columnIndex, long value);
@Override
public long findFirstBoolean(long columnIndex, boolean value) {
return nativeFindFirstBool(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstBool(long nativePtr, long columnIndex, boolean value);
@Override
public long findFirstFloat(long columnIndex, float value) {
return nativeFindFirstFloat(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstFloat(long nativePtr, long columnIndex, float value);
@Override
public long findFirstDouble(long columnIndex, double value) {
return nativeFindFirstDouble(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstDouble(long nativePtr, long columnIndex, double value);
@Override
public long findFirstDate(long columnIndex, Date date) {
return nativeFindFirstDate(nativePtr, columnIndex, date.getTime() / 1000);
}
protected native long nativeFindFirstDate(long nativeTablePtr, long columnIndex, long dateTimeValue);
@Override
public long findFirstString(long columnIndex, String value) {
return nativeFindFirstString(nativePtr, columnIndex, value);
}
protected native long nativeFindFirstString(long nativeTablePtr, long columnIndex, String value);
@Override
public TableView findAllLong(long columnIndex, long value) {
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllInt(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllInt(long nativePtr, long columnIndex, long value);
@Override
public TableView findAllBoolean(long columnIndex, boolean value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllBool(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllBool(long nativePtr, long columnIndex, boolean value);
@Override
public TableView findAllFloat(long columnIndex, float value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllFloat(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllFloat(long nativePtr, long columnIndex, float value);
@Override
public TableView findAllDouble(long columnIndex, double value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDouble(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDouble(long nativePtr, long columnIndex, double value);
@Override
public TableView findAllDate(long columnIndex, Date date) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllDate(nativePtr, columnIndex, date.getTime() / 1000);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllDate(long nativePtr, long columnIndex, long dateTimeValue);
@Override
public TableView findAllString(long columnIndex, String value) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
context.executeDelayedDisposal();
long nativeViewPtr = nativeFindAllString(nativePtr, columnIndex, value);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeFindAllString(long nativePtr, long columnIndex, String value);
// Requires that the first column is a string column with unique values. Index is not required, but will result in better performance
@Override
public long lookup(String value) {
if (value == null)
throw new NullPointerException("String must not be null.");
if (this.getColumnType(0) != ColumnType.STRING)
throw new UnsupportedOperationException("lookup() requires column 0 is a String column.");
return nativeLookup(nativePtr, value);
}
protected native long nativeLookup(long nativeTablePtr, String value);
// Experimental feature
@Override
public long lowerBoundLong(long columnIndex, long value) {
return nativeLowerBoundInt(nativePtr, columnIndex, value);
}
@Override
public long upperBoundLong(long columnIndex, long value) {
return nativeUpperBoundInt(nativePtr, columnIndex, value);
}
protected native long nativeLowerBoundInt(long nativePtr, long columnIndex, long value);
protected native long nativeUpperBoundInt(long nativePtr, long columnIndex, long value);
//
public TableView getDistinctView(long columnIndex) {
// Execute the disposal of abandoned tightdb objects each time a new tightdb object is created
this.context.executeDelayedDisposal();
long nativeViewPtr = nativeGetDistinctView(nativePtr, columnIndex);
try {
return new TableView(this.context, nativeViewPtr, immutable);
} catch (RuntimeException e) {
TableView.nativeClose(nativeViewPtr);
throw e;
}
}
protected native long nativeGetDistinctView(long nativePtr, long columnIndex);
// Optimize
public void optimize() {
if (immutable) throwImmutable();
nativeOptimize(nativePtr);
}
protected native void nativeOptimize(long nativeTablePtr);
@Override
public String toJson() {
return nativeToJson(nativePtr);
}
protected native String nativeToJson(long nativeTablePtr);
@Override
public String toString() {
return nativeToString(nativePtr, INFINITE);
}
@Override
public String toString(long maxRows) {
return nativeToString(nativePtr, maxRows);
}
protected native String nativeToString(long nativeTablePtr, long maxRows);
@Override
public String rowToString(long rowIndex) {
return nativeRowToString(nativePtr, rowIndex);
}
protected native String nativeRowToString(long nativeTablePtr, long rowIndex);
private void throwImmutable() {
throw new IllegalStateException("Mutable method call during read transaction.");
}
}
Diff Result
No diff
Case 87 - java_roboguice.rev_bee33_b6d1a..ContextScope.java
Base
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.application.RoboApplication;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
*
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected final ThreadLocal<Map<Key<Context>, Object>> values = new ThreadLocal<Map<Key<Context>, Object>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected RoboApplication app;
public ContextScope( RoboApplication app ) {
this.app = app;
}
/**
* Scopes can be entered multiple times with no problems (eg. from
* onCreate(), onStart(), etc). However, once they're closed, all their
* previous values are gone forever until the scope is reinitialized again
* via enter().
*/
public void enter(Context context) {
Map<Key<Context>,Object> map = values.get();
if( map==null ) {
map = new HashMap<Key<Context>,Object>();
values.set(map);
}
map.put(Key.get(Context.class), context);
}
@SuppressWarnings({"UnusedParameters"})
public void exit(Context ignored) {
values.remove();
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i) {
viewsForInjection.remove(i).reallyInjectMembers();
}
}
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
@SuppressWarnings({"SuspiciousMethodCalls", "unchecked"})
public T get() {
final Map<Key<Context>, Object> scopedObjects = getScopedObjectMap(key);
@SuppressWarnings("unchecked")
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put((Key<Context>) key, current);
}
return current;
}
};
}
@SuppressWarnings({"UnusedParameters"})
protected <T> Map<Key<Context>, Object> getScopedObjectMap(Key<T> key) {
final Map<Key<Context>,Object> map = values.get();
return map!=null ? map : initialScopedObjectMap();
}
protected Map<Key<Context>,Object> initialScopedObjectMap() {
final HashMap<Key<Context>,Object> map = new HashMap<Key<Context>,Object>();
map.put(Key.get(Context.class),app);
return map;
}
}
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.application.RoboApplication;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
*
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected final ThreadLocal<Map<Key<Context>, Object>> values = new ThreadLocal<Map<Key<Context>, Object>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected RoboApplication app;
public ContextScope( RoboApplication app ) {
this.app = app;
}
/**
* Scopes can be entered multiple times with no problems (eg. from
* onCreate(), onStart(), etc). However, once they're closed, all their
* previous values are gone forever until the scope is reinitialized again
* via enter().
*/
public void enter(Context context) {
Map<Key<Context>,Object> map = values.get();
if( map==null ) {
map = new HashMap<Key<Context>,Object>();
values.set(map);
}
map.put(Key.get(Context.class), context);
}
@SuppressWarnings({"UnusedParameters"})
public void exit(Context ignored) {
values.remove();
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i) {
viewsForInjection.remove(i).reallyInjectMembers();
}
}
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
@SuppressWarnings({"SuspiciousMethodCalls", "unchecked"})
public T get() {
final Map<Key<Context>, Object> scopedObjects = getScopedObjectMap(key);
@SuppressWarnings("unchecked")
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put((Key<Context>) key, current);
}
return current;
}
};
}
@SuppressWarnings({"UnusedParameters"})
protected <T> Map<Key<Context>, Object> getScopedObjectMap(Key<T> key) {
final Map<Key<Context>,Object> map = values.get();
return map!=null ? map : initialScopedObjectMap();
}
protected Map<Key<Context>,Object> initialScopedObjectMap() {
final HashMap<Key<Context>,Object> map = new HashMap<Key<Context>,Object>();
map.put(Key.get(Context.class),app);
return map;
}
}
Left
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.application.RoboApplication;
import roboguice.util.Ln;
import roboguice.util.Strings;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
public ContextScope(RoboApplication app) {
enter(app);
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
@SuppressWarnings("unchecked")
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack<T> {
static class Node<T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.application.RoboApplication;
import roboguice.util.Ln;
import roboguice.util.Strings;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
public ContextScope(RoboApplication app) {
enter(app);
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
@SuppressWarnings("unchecked")
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack<T> {
static class Node<T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
Right
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
*
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected final ThreadLocal<Map<Key<Context>, Object>> values = new ThreadLocal<Map<Key<Context>, Object>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected Application app;
public ContextScope( Application app ) {
this.app = app;
}
/**
* Scopes can be entered multiple times with no problems (eg. from
* onCreate(), onStart(), etc). However, once they're closed, all their
* previous values are gone forever until the scope is reinitialized again
* via enter().
*/
public void enter(Context context) {
Map<Key<Context>,Object> map = values.get();
if( map==null ) {
map = new HashMap<Key<Context>,Object>();
values.set(map);
}
map.put(Key.get(Context.class), context);
}
public void exit(Context ignored) {
values.remove();
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i) {
viewsForInjection.remove(i).reallyInjectMembers();
}
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
/**
* @param <T> is only allowed to be Context
*/
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put((Key<Context>) key, current);
}
return current;
}
};
}
protected Map<Key<Context>,Object> initialScopedObjectMap() {
final HashMap<Key<Context>,Object> map = new HashMap<Key<Context>,Object>();
map.put(Key.get(Context.class),app);
return map;
}
}
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
*
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected final ThreadLocal<Map<Key<Context>, Object>> values = new ThreadLocal<Map<Key<Context>, Object>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected Application app;
public ContextScope( Application app ) {
this.app = app;
}
/**
* Scopes can be entered multiple times with no problems (eg. from
* onCreate(), onStart(), etc). However, once they're closed, all their
* previous values are gone forever until the scope is reinitialized again
* via enter().
*/
public void enter(Context context) {
Map<Key<Context>,Object> map = values.get();
if( map==null ) {
map = new HashMap<Key<Context>,Object>();
values.set(map);
}
map.put(Key.get(Context.class), context);
}
public void exit(Context ignored) {
values.remove();
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i) {
viewsForInjection.remove(i).reallyInjectMembers();
}
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
/**
* @param <T> is only allowed to be Context
*/
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put((Key<Context>) key, current);
}
return current;
}
};
}
protected Map<Key<Context>,Object> initialScopedObjectMap() {
final HashMap<Key<Context>,Object> map = new HashMap<Key<Context>,Object>();
map.put(Key.get(Context.class),app);
return map;
}
}
MergeMethods
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.util.Ln;
import roboguice.util.Strings;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
<<<<<<< MINE
=======
protected Application app;
>>>>>>> YOURS
public ContextScope( Application app ) {
enter(app);
}
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
<<<<<<< MINE
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
=======
public void exit(Context ignored) {
values.remove();
>>>>>>> YOURS
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
<<<<<<< MINE
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
=======
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
>>>>>>> YOURS
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack <T> {
static class Node <T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.util.Ln;
import roboguice.util.Strings;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
<<<<<<< MINE
=======
protected Application app;
>>>>>>> YOURS
public ContextScope( Application app ) {
enter(app);
}
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
<<<<<<< MINE
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
=======
public void exit(Context ignored) {
values.remove();
>>>>>>> YOURS
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
<<<<<<< MINE
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
=======
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
>>>>>>> YOURS
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack <T> {
static class Node <T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
KeepBothMethods
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.util.Ln;
import roboguice.util.Strings;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
<<<<<<< MINE
=======
protected Application app;
>>>>>>> YOURS
public ContextScope(RoboApplication app) {
enter(app);
}
public ContextScope( Application app ) {
this.app = app;
}
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
<<<<<<< MINE
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
=======
public void exit(Context ignored) {
values.remove();
>>>>>>> YOURS
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
<<<<<<< MINE
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
=======
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
>>>>>>> YOURS
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack <T> {
static class Node <T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.util.Ln;
import roboguice.util.Strings;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
<<<<<<< MINE
=======
protected Application app;
>>>>>>> YOURS
public ContextScope(RoboApplication app) {
enter(app);
}
public ContextScope( Application app ) {
this.app = app;
}
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
<<<<<<< MINE
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
=======
public void exit(Context ignored) {
values.remove();
>>>>>>> YOURS
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
<<<<<<< MINE
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
=======
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
>>>>>>> YOURS
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack <T> {
static class Node <T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
Safe
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.util.Ln;
import roboguice.util.Strings;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
<<<<<<< MINE
=======
protected Application app;
>>>>>>> YOURS
<<<<<<< MINE
public ContextScope( Application app ) {
this.app = app;
}
=======
public ContextScope(RoboApplication app) {
enter(app);
}
>>>>>>> YOURS
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
<<<<<<< MINE
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
=======
public void exit(Context ignored) {
values.remove();
>>>>>>> YOURS
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
<<<<<<< MINE
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
=======
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
>>>>>>> YOURS
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack <T> {
static class Node <T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import roboguice.util.Ln;
import roboguice.util.Strings;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
<<<<<<< MINE
=======
protected Application app;
>>>>>>> YOURS
<<<<<<< MINE
public ContextScope( Application app ) {
this.app = app;
}
=======
public ContextScope(RoboApplication app) {
enter(app);
}
>>>>>>> YOURS
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
<<<<<<< MINE
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
=======
public void exit(Context ignored) {
values.remove();
>>>>>>> YOURS
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
<<<<<<< MINE
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
=======
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
>>>>>>> YOURS
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack <T> {
static class Node <T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
Unstructured
/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
<<<<<<< MINE
import roboguice.application.RoboApplication;
import roboguice.util.Ln;
import roboguice.util.Strings;
=======
>>>>>>> YOURS
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
<<<<<<< MINE
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
=======
protected Application app;
>>>>>>> YOURS
<<<<<<< MINE
public ContextScope(RoboApplication app) {
enter(app);
=======
public ContextScope( Application app ) {
this.app = app;
}
/**
* Scopes can be entered multiple times with no problems (eg. from
* onCreate(), onStart(), etc). However, once they're closed, all their
* previous values are gone forever until the scope is reinitialized again
* via enter().
*/
public void enter(Context context) {
Map<Key<Context>,Object> map = values.get();
if( map==null ) {
map = new HashMap<Key<Context>,Object>();
values.set(map);
}
map.put(Key.get(Context.class), context);
}
public void exit(Context ignored) {
values.remove();
>>>>>>> YOURS
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
/**
* @param <T> is only allowed to be Context
*/
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
<<<<<<< MINE
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
=======
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
>>>>>>> YOURS
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
<<<<<<< MINE
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
=======
>>>>>>> YOURS
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack<T> {
static class Node<T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}/*
* Copyright 2009 Michael Burton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package roboguice.inject;
/**
* Scopes a single execution of a block of code. Apply this scope with a
* try/finally block: <pre> {@code
*
* scope.enter();
* try {
* // explicitly seed some seed objects...
* scope.seed(Key.get(SomeObject.class), someObject);
* // create and access scoped objects
* } finally {
* scope.exit();
* }
* }</pre>
*
* The scope can be initialized with one or more seed values by calling
* <code>seed(key, value)</code> before the injector will be called upon to
* provide for this key. A typical use is for a servlet filter to enter/exit the
* scope, representing a Request Scope, and seed HttpServletRequest and
* HttpServletResponse. For each key inserted with seed(), it's good practice
* (since you have to provide <i>some</i> binding anyhow) to include a
* corresponding binding that will throw an exception if Guice is asked to
* provide for that key if it was not yet seeded: <pre> {@code
*
* bind(key)
* .toProvider(ContextScope.<KeyClass>seededKeyProvider())
* .in(ScopeAnnotation.class);
* }</pre>
*
* @author Jesse Wilson
* @author Fedor Karpelevitch
*
*
* From http://code.google.com/p/google-guice/wiki/CustomScopes
*/
<<<<<<< MINE
import roboguice.application.RoboApplication;
import roboguice.util.Ln;
import roboguice.util.Strings;
=======
>>>>>>> YOURS
import android.app.Application;
import android.content.Context;
import com.google.inject.Key;
import com.google.inject.Provider;
import com.google.inject.Scope;
import com.google.inject.internal.Maps;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Map;
import java.util.WeakHashMap;
/**
* @author Mike Burton
*/
public class ContextScope implements Scope {
protected WeakHashMap<Context,Map<Key<?>, Object>> values = new WeakHashMap<Context,Map<Key<?>, Object>>();
protected ThreadLocal<WeakActiveStack<Context>> contextStack = new ThreadLocal<WeakActiveStack<Context>>();
protected ArrayList<ViewMembersInjector<?>> viewsForInjection = new ArrayList<ViewMembersInjector<?>>();
<<<<<<< MINE
protected ArrayList<PreferenceMembersInjector<?>> preferencesForInjection = new ArrayList<PreferenceMembersInjector<?>>();
=======
protected Application app;
>>>>>>> YOURS
<<<<<<< MINE
public ContextScope(RoboApplication app) {
enter(app);
=======
public ContextScope( Application app ) {
this.app = app;
}
/**
* Scopes can be entered multiple times with no problems (eg. from
* onCreate(), onStart(), etc). However, once they're closed, all their
* previous values are gone forever until the scope is reinitialized again
* via enter().
*/
public void enter(Context context) {
Map<Key<Context>,Object> map = values.get();
if( map==null ) {
map = new HashMap<Key<Context>,Object>();
values.set(map);
}
map.put(Key.get(Context.class), context);
}
public void exit(Context ignored) {
values.remove();
>>>>>>> YOURS
}
public void registerViewForInjection(ViewMembersInjector<?> injector) {
viewsForInjection.add(injector);
}
public void registerPreferenceForInjection(PreferenceMembersInjector<?> injector) {
preferencesForInjection.add(injector);
}
public void injectViews() {
for (int i = viewsForInjection.size() - 1; i >= 0; --i)
viewsForInjection.remove(i).reallyInjectMembers();
}
public void injectPreferenceViews() {
for (int i = preferencesForInjection.size() - 1; i >= 0; --i)
preferencesForInjection.remove(i).reallyInjectMembers();
}
public void enter(Context context) {
ensureContextStack();
contextStack.get().push(context);
final Key<Context> key = Key.get(Context.class);
getScopedObjectMap(key).put(key, context);
if( Ln.isVerboseEnabled() ) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null )
Ln.v("Contexts in the %s scope map after inserting %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public void exit(Context context) {
ensureContextStack();
contextStack.get().remove(context);
}
public void dispose(Context context) {
final WeakHashMap<Context,Map<Key<?>,Object>> map = values;
if( map!=null ) {
final Map<Key<?>,Object> scopedObjects = map.remove(context);
if( scopedObjects!=null )
scopedObjects.clear();
if( Ln.isVerboseEnabled() )
Ln.v("Contexts in the %s scope map after removing %s: %s", Thread.currentThread().getName(), context, Strings.join( ", ", map.keySet()));
}
}
public Provider<Context> scope() {
return scope(Key.get(Context.class), new Provider<Context>() {
public Context get() {
return app;
}
});
}
/**
* @param <T> is only allowed to be Context
*/
@SuppressWarnings({"SuspiciousMethodCalls","unchecked"})
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
public T get() {
<<<<<<< MINE
Map<Key<?>, Object> scopedObjects = getScopedObjectMap(key);
=======
final Map<Key<Context>,Object> map = values.get();
final Map<Key<Context>, Object> scopedObjects = map != null ? map : initialScopedObjectMap();
>>>>>>> YOURS
T current = (T) scopedObjects.get(key);
if (current == null && !scopedObjects.containsKey(key)) {
current = unscoped.get();
scopedObjects.put(key, current);
}
return current;
}
};
<<<<<<< MINE
}
protected void ensureContextStack() {
if (contextStack.get() == null) {
contextStack.set(new WeakActiveStack<Context>());
}
=======
>>>>>>> YOURS
}
protected <T> Map<Key<?>, Object> getScopedObjectMap(Key<T> key) {
final Context context = contextStack.get().peek();
Map<Key<?>,Object> scopedObjects = values.get(context);
if (scopedObjects == null) {
scopedObjects = Maps.newHashMap();
values.put(context, scopedObjects);
}
return scopedObjects;
}
/**
* A circular stack like structure of weak references.
* Calls to push while not add any new items to stack if the item already exists,
* it will simply bring the item to the top of the stack.
*
* Likewise, pop will not remove the item from the stack, it will simply make the next item
* the top, move the current top to the bottom. Thus creating a circular linked list type effect.
*
* To remove an item explicitly call the remove method.
*
* The stack also holds WeakReferences of T, these references will automatically be removed
* anytime the stack accessed. For performance they are only removed as they are encountered.
*
* So it is possible to get a null value back, even though you thought the stack had items in it.
* @param <T>
*/
public static class WeakActiveStack<T> {
static class Node<T> {
Node<T> previous;
Node<T> next;
WeakReference<T> value;
public Node(T value) {
this.value = new WeakReference<T>(value);
}
}
private Node<T> head;
private Node<T> tail;
/**
* Pushes the value onto the top of the stack.
* If the value exists in the stack it is simply brought to the top.
* @param value
*/
public void push(T value) {
if (head == null) {
head = new Node<T>(value);
tail = head;
} else {
Node<T> existingNode = findNode(value);
if (existingNode == null) {
Node<T> newNode = new Node<T>(value);
newNode.next = head;
head.previous = newNode;
head = newNode;
} else {
if (existingNode == head) return;
if (existingNode == tail) {
tail = existingNode.previous;
tail.next= null;
}
if (existingNode.previous != null) {
existingNode.previous.next = existingNode.next;
}
if (existingNode.next != null) {
existingNode.next.previous = existingNode.previous;
}
existingNode.next = head;
head.previous = existingNode;
head = existingNode;
head.previous = null;
}
}
}
/**
* Pops the first item off the stack, then moves it to the bottom.
* Popping is an infinite operation that will never end, it just keeps moving the top item to the bottom.
* Popping will also dispose of items whose weak references have been collected.
* @return The value of the item at the top of the stack.
*/
public T pop() {
WeakActiveStack.Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
if (node.next != null) {
head = node.next;
node.previous = tail;
tail.next = node;
node.next = null;
head.previous = null;
tail = node;
}
return value;
}
}
return null;
}
/**
* Non destructive read of the item at the top of stack.
* @return the first non collected referent held, or null if nothing is available.
*/
public T peek() {
Node<T> node = head;
while (node != null) {
final T value = node.value.get();
if (value == null) {
node = disposeOfNode(node);
} else {
return value;
}
}
return null;
}
/**
* Removes the item from the stack.
* @param value
*/
public void remove(T value) {
Node<T> node = findNode(value);
disposeOfNode(node);
}
/**
* Removes a node ensuring all links are properly updated.
* @param node
* @return The next node in the stack.
*/
protected Node<T> disposeOfNode(Node<T> node) {
if (node == head) {
head = node.next;
if (head == null) {
tail = null;
} else {
head.previous = null;
}
}
if (node.previous != null) {
node.previous.next = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
}
if (node == tail) {
tail = node.previous;
tail.next = null;
}
return node.next;
}
/**
* Finds a node given a value
* Will dispose of nodes if needed as it iterates the stack.
* @param value
* @return The node if found or null
*/
protected Node<T> findNode(T value) {
Node<T> node = head;
while (node != null) {
final T nodeValue = node.value.get();
if (nodeValue == null) {
node = disposeOfNode(node);
} else if (nodeValue.equals(value)) {
return node;
} else {
node = node.next;
}
}
return null;
}
}
}
Diff Result
No diff
Case 88 - java_roboguice.rev_bee33_b6d1a..EventManager.java
Base
package roboguice.event;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<ObserverReference<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<ObserverReference<?>>>>();
public boolean isEnabled() {
return true;
}
/**
* Registers given method with provided context and event.
*/
public void registerObserver(Context context, Object instance, Method method, Class event) {
if (!isEnabled()) return;
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
/**
* Unregisters all methods observing the given event from the provided context.
*/
public void unregisterObserver(Context context, Object instance, Class event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*/
public void clear( Context context ) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context
* @param event
*/
public void fire(Context context, Object event) {
if (!isEnabled()) return;
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (ObserverReference observer : observers) {
try {
observer.invoke(event,null);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
public static class ObserverReference<ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
}
package roboguice.event;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<ObserverReference<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<ObserverReference<?>>>>();
public boolean isEnabled() {
return true;
}
/**
* Registers given method with provided context and event.
*/
public void registerObserver(Context context, Object instance, Method method, Class event) {
if (!isEnabled()) return;
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
/**
* Unregisters all methods observing the given event from the provided context.
*/
public void unregisterObserver(Context context, Object instance, Class event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*/
public void clear( Context context ) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context
* @param event
*/
public void fire(Context context, Object event) {
if (!isEnabled()) return;
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (ObserverReference observer : observers) {
try {
observer.invoke(event,null);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
public static class ObserverReference<ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
}
Left
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
public boolean isEnabled() {
return true;
}
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(), event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Unregister all methods observing the given event from the provided context.
*
* @param context associated with event
* @param instance to be unregistered
* @param event observed
*/
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener listener = iterator.next();
if( listener instanceof ObserverMethodListener ) {
final ObserverMethodListener observer = ((ObserverMethodListener)listener);
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
public static class ObserverMethodListener<T> implements EventListener<T> {
protected String descriptor;
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
public boolean isEnabled() {
return true;
}
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(), event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Unregister all methods observing the given event from the provided context.
*
* @param context associated with event
* @param instance to be unregistered
* @param event observed
*/
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener listener = iterator.next();
if( listener instanceof ObserverMethodListener ) {
final ObserverMethodListener observer = ((ObserverMethodListener)listener);
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
public static class ObserverMethodListener<T> implements EventListener<T> {
protected String descriptor;
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
Right
package roboguice.event;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<ObserverReference<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<ObserverReference<?>>>>();
/**
* Registers given method with provided context and event.
*/
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
/**
* Unregisters all methods observing the given event from the provided context.
*/
public void unregisterObserver(Context context, Object instance, Class event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*/
public void clear( Context context ) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context
* @param event
*/
public void fire(Context context, Object event) {
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (ObserverReference observer : observers) {
try {
observer.invoke(event,null);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class ObserverReference<ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
}
package roboguice.event;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<ObserverReference<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<ObserverReference<?>>>>();
/**
* Registers given method with provided context and event.
*/
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
/**
* Unregisters all methods observing the given event from the provided context.
*/
public void unregisterObserver(Context context, Object instance, Class event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*/
public void clear( Context context ) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context
* @param event
*/
public void fire(Context context, Object event) {
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (ObserverReference observer : observers) {
try {
observer.invoke(event,null);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class ObserverReference<ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
}
MergeMethods
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
<<<<<<< MINE
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
=======
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class ObserverReference <ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
>>>>>>> YOURS
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Registers given method with provided context and event.
*/
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
/**
* Unregisters all methods observing the given event from the provided context.
*/
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
<<<<<<< MINE
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
public static class ObserverMethodListener <T> implements EventListener<T> {
protected String descriptor;
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
<<<<<<< MINE
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
=======
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class ObserverReference <ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
>>>>>>> YOURS
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Registers given method with provided context and event.
*/
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
/**
* Unregisters all methods observing the given event from the provided context.
*/
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
<<<<<<< MINE
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
public static class ObserverMethodListener <T> implements EventListener<T> {
protected String descriptor;
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
KeepBothMethods
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
<<<<<<< MINE
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
=======
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class ObserverReference <ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
>>>>>>> YOURS
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Registers given method with provided context and event.
*/
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
/**
* Unregisters all methods observing the given event from the provided context.
*/
public void unregisterObserver(Context context, Object instance, Class event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Unregister all methods observing the given event from the provided context.
*
* @param context associated with event
* @param instance to be unregistered
* @param event observed
*/
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener listener = iterator.next();
if( listener instanceof ObserverMethodListener ) {
final ObserverMethodListener observer = ((ObserverMethodListener)listener);
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
<<<<<<< MINE
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
public static class ObserverMethodListener <T> implements EventListener<T> {
protected String descriptor;
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
<<<<<<< MINE
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
=======
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class ObserverReference <ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
>>>>>>> YOURS
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Registers given method with provided context and event.
*/
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
/**
* Unregisters all methods observing the given event from the provided context.
*/
public void unregisterObserver(Context context, Object instance, Class event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Unregister all methods observing the given event from the provided context.
*
* @param context associated with event
* @param instance to be unregistered
* @param event observed
*/
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener listener = iterator.next();
if( listener instanceof ObserverMethodListener ) {
final ObserverMethodListener observer = ((ObserverMethodListener)listener);
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
<<<<<<< MINE
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
public static class ObserverMethodListener <T> implements EventListener<T> {
protected String descriptor;
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
Safe
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
<<<<<<< MINE
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
=======
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class ObserverReference <ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
>>>>>>> YOURS
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Registers given method with provided context and event.
*/
<<<<<<< MINE
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
=======
>>>>>>> YOURS
/**
* Unregisters all methods observing the given event from the provided context.
*/
<<<<<<< MINE
public void unregisterObserver(Context context, Object instance, Class event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
=======
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener listener = iterator.next();
if( listener instanceof ObserverMethodListener ) {
final ObserverMethodListener observer = ((ObserverMethodListener)listener);
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
>>>>>>> YOURS
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
<<<<<<< MINE
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
public static class ObserverMethodListener <T> implements EventListener<T> {
protected String descriptor;
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
<<<<<<< MINE
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
=======
>>>>>>> YOURS
<<<<<<< MINE
=======
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* A result handler can be provided to deal with the return values from the invoked observer methods.
*
* @param context
* @param event
*/
/*
// Disabled for now until we can figure out best way to proceed
public <ResultType> ResultType notifyWithResult(Context context, Object event, ResultType defaultValue ) {
if (!isEnabled()) return defaultValue;
if( event.getClass().getAnnotation(Returns.class)==null )
throw new RuntimeException("You must use fire with events that do not expect return values");
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return defaultValue;
final Set<ObserverReference<?>> observers = methods.get(event.getClass());
if (observers == null) return defaultValue;
for (ObserverReference<?> o : observers) {
final ObserverReference<ResultType> observer = (ObserverReference<ResultType>) o;
try {
return observer.invoke( event, defaultValue);
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return defaultValue;
}
*/
public static class ObserverReference <ResultType> {
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverReference(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
method.setAccessible(true);
}
public ResultType invoke(Object event, ResultType defaultValue ) throws InvocationTargetException, IllegalAccessException {
final Object instance = instanceReference.get();
return instance == null ? defaultValue : (ResultType) method.invoke(instance, event);
}
}
>>>>>>> YOURS
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Registers given method with provided context and event.
*/
<<<<<<< MINE
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
}
=======
>>>>>>> YOURS
/**
* Unregisters all methods observing the given event from the provided context.
*/
<<<<<<< MINE
public void unregisterObserver(Context context, Object instance, Class event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<ObserverReference<?>> iterator = observers.iterator(); iterator.hasNext();) {
ObserverReference observer = iterator.next();
if (observer != null) {
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
=======
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener listener = iterator.next();
if( listener instanceof ObserverMethodListener ) {
final ObserverMethodListener observer = ((ObserverMethodListener)listener);
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
>>>>>>> YOURS
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(),event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
<<<<<<< MINE
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
public static class ObserverMethodListener <T> implements EventListener<T> {
protected String descriptor;
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
Unstructured
package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
<<<<<<< MINE
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
=======
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
>>>>>>> YOURS
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
<<<<<<< MINE
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(), event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
public void unregisterObserver(Context context, Object instance, Class event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Unregister all methods observing the given event from the provided context.
*
* @param context associated with event
* @param instance to be unregistered
* @param event observed
*/
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener listener = iterator.next();
if( listener instanceof ObserverMethodListener ) {
final ObserverMethodListener observer = ((ObserverMethodListener)listener);
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
<<<<<<< MINE
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
<<<<<<< MINE
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
public static class ObserverMethodListener<T> implements EventListener<T> {
protected String descriptor;
=======
public static class ObserverReference<ResultType> {
>>>>>>> YOURS
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}package roboguice.event;
import roboguice.event.javaassist.RuntimeSupport;
import roboguice.util.Ln;
import android.app.Application;
import android.content.Context;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
/**
* Manager class handling the following:
*
* Registration of event observing methods:
* registerObserver()
* unregisterObserver()
* clear()
* Raising Events:
* fire()
* notifyWithResult()
*
* @author Adam Tybor
* @author John Ericksen
*/
@SuppressWarnings({"unchecked"})
@Singleton
public class EventManager {
@Inject protected Provider<Context> contextProvider;
protected Map<Context, Map<Class<?>, Set<EventListener<?>>>> registrations = new WeakHashMap<Context, Map<Class<?>, Set<EventListener<?>>>>();
/**
* Register an observer EventListener with the current context (provided).
*
* @param event to observe
* @param listener to be triggered
* @param <T> event type
*/
<<<<<<< MINE
public <T> void registerObserver( Class<T> event, EventListener listener ) {
registerObserver(contextProvider.get(),event,listener);
=======
public void registerObserver(Context context, Object instance, Method method, Class event) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<ObserverReference<?>>>();
registrations.put(context, methods);
}
Set<ObserverReference<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<ObserverReference<?>>();
methods.put(event, observers);
}
/*
final Returns returns = (Returns) event.getAnnotation(Returns.class);
if( returns!=null ) {
if( !returns.value().isAssignableFrom(method.getReturnType()) )
throw new RuntimeException( String.format("Method %s.%s does not return a value that is assignable to %s",method.getDeclaringClass().getName(),method.getName(),returns.value().getName()) );
if( !observers.isEmpty() ) {
final ObserverReference observer = observers.iterator().next();
throw new RuntimeException( String.format("Only one observer allowed for event types that return a value annotation. Previously registered observer is %s.%s", observer.method.getDeclaringClass().getName(), observer.method.getName()));
}
}
*/
observers.add(new ObserverReference(instance, method));
>>>>>>> YOURS
}
/**
* Register a method observer with the current context (provided).
*
* @param instance to be called
* @param method to be called
* @param event observed
* @param <T> event type
*/
<<<<<<< MINE
public <T> void registerObserver(Object instance, Method method, Class<T> event) {
registerObserver(contextProvider.get(),instance,method,event);
}
/**
* Unregister the given EventListener with the current context (provided).
*
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Class<T> event, EventListener<T> listener ) {
unregisterObserver(contextProvider.get(),event,listener);
}
/**
* Unregister the given event from the current context (provided).
*
* @param instance to be unregistered
* @param event observed
* @param <T> event type
*/
public <T> void unregisterObserver(Object instance, Class<T> event) {
unregisterObserver(contextProvider.get(),instance,event);
}
/**
* Raises the event's class' event on the current context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param event observed
*/
public void fire( Object event ) {
fire(contextProvider.get(), event);
}
/**
* Register the given EventListener to the contest and event class.
*
* @param context associated with event
* @param event observed
* @param listener to be triggered
* @param <T> event type
*/
public <T> void registerObserver( Context context, Class<T> event, EventListener listener ) {
if( context instanceof Application )
throw new RuntimeException("You may not register event handlers on the Application context");
Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) {
methods = new HashMap<Class<?>, Set<EventListener<?>>>();
registrations.put(context, methods);
}
Set<EventListener<?>> observers = methods.get(event);
if (observers == null) {
observers = new HashSet<EventListener<?>>();
methods.put(event, observers);
}
observers.add(listener);
}
/**
* Registers given method with provided context and event.
*
* @param context associated with event
* @param instance to be called
* @param method to be called
* @param event observed
*/
public <T> void registerObserver(Context context, Object instance, Method method, Class<T> event) {
registerObserver(context, event, new ObserverMethodListener<T>(instance, method));
}
/**
* Unregisters the provided event listener from the given event
*
* @param context associated with event
* @param event observed
* @param listener to be unregistered
* @param <T> event type
*/
public <T> void unregisterObserver(Context context, Class<T> event, EventListener<T> listener ) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
public void unregisterObserver(Context context, Object instance, Class event) {
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener registeredListener = iterator.next();
if (registeredListener == listener) {
iterator.remove();
break;
}
}
}
/**
* Unregister all methods observing the given event from the provided context.
*
* @param context associated with event
* @param instance to be unregistered
* @param event observed
*/
public <T> void unregisterObserver(Context context, Object instance, Class<T> event) {
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event);
if (observers == null) return;
for (Iterator<EventListener<?>> iterator = observers.iterator(); iterator.hasNext();) {
final EventListener listener = iterator.next();
if( listener instanceof ObserverMethodListener ) {
final ObserverMethodListener observer = ((ObserverMethodListener)listener);
final Object registeredInstance = observer.instanceReference.get();
if (registeredInstance == instance) {
iterator.remove();
break;
}
}
}
}
/**
* Clears all observers of the given context.
*
* @param context associated with event
*/
public void clear( Context context ) {
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
if (methods == null) return;
registrations.remove(context);
methods.clear();
}
/**
* Raises the event's class' event on the given context. This event object is passed (if configured) to the
* registered observer's method.
*
* @param context associated with event
* @param event observed
*/
public void fire(Context context, Object event) {
<<<<<<< MINE
if (!isEnabled()) return;
final Map<Class<?>, Set<EventListener<?>>> methods = registrations.get(context);
=======
/*
if( event.getClass().getAnnotation(Returns.class)!=null )
throw new RuntimeException("You must use notifyWithResult for events that expect return values");
*/
final Map<Class<?>, Set<ObserverReference<?>>> methods = registrations.get(context);
>>>>>>> YOURS
if (methods == null) return;
final Set<EventListener<?>> observers = methods.get(event.getClass());
if (observers == null) return;
for (EventListener observer : observers)
observer.onEvent(event);
}
<<<<<<< MINE
public static class NullEventManager extends EventManager {
@Override
public boolean isEnabled() {
return false;
}
}
public static class ObserverMethodListener<T> implements EventListener<T> {
protected String descriptor;
=======
public static class ObserverReference<ResultType> {
>>>>>>> YOURS
protected Method method;
protected WeakReference<Object> instanceReference;
public ObserverMethodListener(Object instance, Method method) {
this.instanceReference = new WeakReference<Object>(instance);
this.method = method;
this.descriptor = method.getName() + ':' + RuntimeSupport.makeDescriptor(method);
method.setAccessible(true);
}
public void onEvent(T event) {
try {
final Object instance = instanceReference.get();
if (instance != null) {
method.invoke(instance, event);
} else {
Ln.w("trying to observe event %1$s on disposed context, consider explicitly calling EventManager.unregisterObserver", method.getName());
}
} catch (InvocationTargetException e) {
Ln.e(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ObserverMethodListener that = (ObserverMethodListener) o;
if (descriptor != null ? !descriptor.equals(that.descriptor) : that.descriptor != null) return false;
final Object thisInstance = instanceReference.get();
final Object thatInstance = that.instanceReference.get();
return !(thisInstance != null ? !thisInstance.equals(thatInstance) : thatInstance != null);
}
@Override
public int hashCode() {
int result = descriptor != null ? descriptor.hashCode() : 0;
final Object thisInstance = instanceReference.get();
result = 31 * result + (thisInstance != null ? thisInstance.hashCode() : 0);
return result;
}
}
}
Diff Result
No diff
Case 89 - java_rxjava.rev_af383_8a388..Subject.java
Base
package rx.subjects;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import junit.framework.Assert;
import org.junit.Test;
import rx.Notification;
import rx.Observable;
import rx.Observer;
import rx.Subscription;
import rx.util.AtomicObservableSubscription;
import rx.util.SynchronizedObserver;
import rx.util.functions.Action1;
import rx.util.functions.Func1;
public class Subject<T> extends Observable<T> implements Observer<T> {
public static <T> Subject<T> create() {
final ConcurrentHashMap<Subscription, Observer<T>> observers = new ConcurrentHashMap<Subscription, Observer<T>>();
Func1<Observer<T>, Subscription> onSubscribe = new Func1<Observer<T>, Subscription>() {
@Override
public Subscription call(Observer<T> observer) {
final AtomicObservableSubscription subscription = new AtomicObservableSubscription();
subscription.wrap(new Subscription() {
@Override
public void unsubscribe() {
// on unsubscribe remove it from the map of outbound observers to notify
observers.remove(subscription);
}
});
// on subscribe add it to the map of outbound observers to notify
observers.put(subscription, new SynchronizedObserver<T>(observer, subscription));
return subscription;
}
};
return new Subject<T>(onSubscribe, observers);
}
private final ConcurrentHashMap<Subscription, Observer<T>> observers;
protected Subject(Func1<Observer<T>, Subscription> onSubscribe, ConcurrentHashMap<Subscription, Observer<T>> observers) {
super(onSubscribe);
this.observers = observers;
}
@Override
public void onCompleted() {
for (Observer<T> observer : observers.values()) {
observer.onCompleted();
}
}
@Override
public void onError(Exception e) {
for (Observer<T> observer : observers.values()) {
observer.onError(e);
}
}
@Override
public void onNext(T args) {
for (Observer<T> observer : observers.values()) {
observer.onNext(args);
}
}
public static class UnitTest {
@Test
public void test() {
Subject<Integer> subject = Subject.create();
final AtomicReference<List<Notification<String>>> actualRef = new AtomicReference<List<Notification<String>>>();
Observable<List<Notification<Integer>>> wNotificationsList = subject.materialize().toList();
wNotificationsList.subscribe(new Action1<List<Notification<String>>>() {
@Override
public void call(List<Notification<String>> actual) {
actualRef.set(actual);
}
});
Subscription sub = Observable.create(new Func1<Observer<Integer>, Subscription>() {
@Override
public Subscription call(final Observer<Integer> observer) {
final AtomicBoolean stop = new AtomicBoolean(false);
new Thread() {
@Override
public void run() {
int i = 1;
while (!stop.get()) {
observer.onNext(i++);
}
observer.onCompleted();
}
}.start();
return new Subscription() {
@Override
public void unsubscribe() {
stop.set(true);
}
};
}
}).subscribe(subject);
// the subject has received an onComplete from the first subscribe because
// it is synchronous and the next subscribe won't do anything.
Observable.toObservable(-1, -2, -3).subscribe(subject);
List<Notification<Integer>> expected = new ArrayList<Notification<Integer>>();
expected.add(new Notification<Integer>(-1));
expected.add(new Notification<Integer>(-2));
expected.add(new Notification<Integer>(-3));
expected.add(new Notification<Integer>());
Assert.assertTrue(actualRef.get().containsAll(expected));
sub.unsubscribe();
}
}
}
package rx.subjects;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import junit.framework.Assert;
import org.junit.Test;
import rx.Notification;
import rx.Observable;
import rx.Observer;
import rx.Subscription;
import rx.util.AtomicObservableSubscription;
import rx.util.SynchronizedObserver;
import rx.util.functions.Action1;
import rx.util.functions.Func1;
public class Subject<T> extends Observable<T> implements Observer<T> {
public static <T> Subject<T> create() {
final ConcurrentHashMap<Subscription, Observer<T>> observers = new ConcurrentHashMap<Subscription, Observer<T>>();
Func1<Observer<T>, Subscription> onSubscribe = new Func1<Observer<T>, Subscription>() {
@Override
public Subscription call(Observer<T> observer) {
final AtomicObservableSubscription subscription = new AtomicObservableSubscription();
subscription.wrap(new Subscription() {
@Override
public void unsubscribe() {
// on unsubscribe remove it from the map of outbound observers to notify
observers.remove(subscription);
}
});
// on subscribe add it to the map of outbound observers to notify
observers.put(subscription, new SynchronizedObserver<T>(observer, subscription));
return subscription;
}
};
return new Subject<T>(onSubscribe, observers);
}
private final ConcurrentHashMap<Subscription, Observer<T>> observers;
protected Subject(Func1<Observer<T>, Subscription> onSubscribe, ConcurrentHashMap<Subscription, Observer<T>> observers) {
super(onSubscribe);
this.observers = observers;
}
@Override
public void onCompleted() {
for (Observer<T> observer : observers.values()) {
observer.onCompleted();
}
}
@Override
public void onError(Exception e) {
for (Observer<T> observer : observers.values()) {
observer.onError(e);
}
}
@Override
public void onNext(T args) {
for (Observer<T> observer : observers.values()) {
observer.onNext(args);
}
}
public static class UnitTest {
@Test
public void test() {
Subject<Integer> subject = Subject.create();
final AtomicReference<List<Notification<String>>> actualRef = new AtomicReference<List<Notification<String>>>();
Observable<List<Notification<Integer>>> wNotificationsList = subject.materialize().toList();
wNotificationsList.subscribe(new Action1<List<Notification<String>>>() {
@Override
public void call(List<Notification<String>> actual) {
actualRef.set(actual);
}
});
Subscription sub = Observable.create(new Func1<Observer<Integer>, Subscription>() {
@Override
public Subscription call(final Observer<Integer> observer) {
final AtomicBoolean stop = new AtomicBoolean(false);
new Thread() {
@Override
public void run() {
int i = 1;
while (!stop.get()) {
observer.onNext(i++);
}
observer.onCompleted();
}
}.start();
return new Subscription() {
@Override
public void unsubscribe() {
stop.set(true);
}
};
}
}).subscribe(subject);
// the subject has received an onComplete from the first subscribe because
// it is synchronous and the next subscribe won't do anything.
Observable.toObservable(-1, -2, -3).subscribe(subject);
List<Notification<Integer>> expected = new ArrayList<Notification<Integer>>();
expected.add(new Notification<Integer>(-1));
expected.add(new Notification<Integer>(-2));
expected.add(new Notification<Integer>(-3));
expected.add(new Notification<Integer>());
Assert.assertTrue(actualRef.get().containsAll(expected));
sub.unsubscribe();
}
}
}
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.subjects;
import rx.Observable;
import rx.Observer;
import rx.Subscription;
import rx.util.functions.Func1;
public abstract class Subject<T, R> extends Observable<R> implements Observer<T> {
protected Subject(Func1<Observer<R>, Subscription> onSubscribe) {
super(onSubscribe);
}
}
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.subjects;
import rx.Observable;
import rx.Observer;
import rx.Subscription;
import rx.util.functions.Func1;
public abstract class Subject<T, R> extends Observable<R> implements Observer<T> {
protected Subject(Func1<Observer<R>, Subscription> onSubscribe) {
super(onSubscribe);
}
}
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.subjects;
import rx.Observable;
import rx.Observer;
import rx.Subscription;
import rx.util.functions.Func1;
<<<<<<< MINE
public abstract class Subject<I, O> extends Observable<O> implements Observer<I> {
protected Subject()
{
super();
}
protected Subject(Func1<Observer<O>, Subscription> onSubscribe)
{
=======
public abstract class Subject<T, R> extends Observable<R> implements Observer<T> {
protected Subject(Func1<Observer<R>, Subscription> onSubscribe) {
>>>>>>> YOURS
super(onSubscribe);
}
}/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.subjects;
import rx.Observable;
import rx.Observer;
import rx.Subscription;
import rx.util.functions.Func1;
<<<<<<< MINE
public abstract class Subject<I, O> extends Observable<O> implements Observer<I> {
protected Subject()
{
super();
}
protected Subject(Func1<Observer<O>, Subscription> onSubscribe)
{
=======
public abstract class Subject<T, R> extends Observable<R> implements Observer<T> {
protected Subject(Func1<Observer<R>, Subscription> onSubscribe) {
>>>>>>> YOURS
super(onSubscribe);
}
}
Diff Result
No diff
Case 90 - java_voldemort.rev_6efd9_d55c7..ForceFailStore.java
Base
package voldemort.store;
public class ForceFailStore<K, V> extends DelegatingStore<K, V> {
public ForceFailStore(Store<K, V> innerStore) {
super(innerStore);
}
}
package voldemort.store;
public class ForceFailStore<K, V> extends DelegatingStore<K, V> {
public ForceFailStore(Store<K, V> innerStore) {
super(innerStore);
}
}
Left
package voldemort.store;
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore<K, V> extends DelegatingStore<K, V> {
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
public ForceFailStore(Store<K, V> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if(fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().get(key);
}
}
package voldemort.store;
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore<K, V> extends DelegatingStore<K, V> {
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
public ForceFailStore(Store<K, V> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if(fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().get(key);
}
}
Right
package voldemort.store;
public class ForceFailStore<K, V, T> extends DelegatingStore<K, V, T> {
public ForceFailStore(Store<K, V, T> innerStore) {
super(innerStore);
}
}
package voldemort.store;
public class ForceFailStore<K, V, T> extends DelegatingStore<K, V, T> {
public ForceFailStore(Store<K, V, T> innerStore) {
super(innerStore);
}
}
MergeMethods
package voldemort.store;
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore<K, V, T> extends DelegatingStore<K, V, T> {
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
public ForceFailStore(Store<K, V, T> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if (fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().get(key);
}
}
package voldemort.store;
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore<K, V, T> extends DelegatingStore<K, V, T> {
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
public ForceFailStore(Store<K, V, T> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if (fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().get(key);
}
}
KeepBothMethods
package voldemort.store;
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore<K, V, T> extends DelegatingStore<K, V, T> {
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
public ForceFailStore(Store<K, V> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if (fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().get(key);
}
public ForceFailStore(Store<K, V, T> innerStore) {
super(innerStore);
}
}
package voldemort.store;
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore<K, V, T> extends DelegatingStore<K, V, T> {
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
public ForceFailStore(Store<K, V> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if (fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if (fail)
throw e;
return getInnerStore().get(key);
}
public ForceFailStore(Store<K, V, T> innerStore) {
super(innerStore);
}
}
Safe
package voldemort.store;
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore <K, V, T> extends DelegatingStore<K, V, T> {
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
<<<<<<< MINE
public ForceFailStore(Store<K, V, T> innerStore) {
super(innerStore);
}
=======
public ForceFailStore(Store<K, V> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
}
>>>>>>> YOURS
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if(fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().get(key);
}
}
package voldemort.store;
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore <K, V, T> extends DelegatingStore<K, V, T> {
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
<<<<<<< MINE
public ForceFailStore(Store<K, V, T> innerStore) {
super(innerStore);
}
=======
public ForceFailStore(Store<K, V> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
}
>>>>>>> YOURS
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if(fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().get(key);
}
}
Unstructured
package voldemort.store;
<<<<<<< MINE
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore<K, V> extends DelegatingStore<K, V> {
=======
public class ForceFailStore<K, V, T> extends DelegatingStore<K, V, T> {
>>>>>>> YOURS
<<<<<<< MINE
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
public ForceFailStore(Store<K, V> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
=======
public ForceFailStore(Store<K, V, T> innerStore) {
super(innerStore);
>>>>>>> YOURS
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if(fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().get(key);
}
}package voldemort.store;
<<<<<<< MINE
import voldemort.VoldemortException;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import java.util.List;
import java.util.Map;
public class ForceFailStore<K, V> extends DelegatingStore<K, V> {
=======
public class ForceFailStore<K, V, T> extends DelegatingStore<K, V, T> {
>>>>>>> YOURS
<<<<<<< MINE
private final VoldemortException e;
private final Object identifier;
private volatile boolean fail = false;
public ForceFailStore(Store<K, V> innerStore) {
this(innerStore, new VoldemortException("Operation failed!"));
=======
public ForceFailStore(Store<K, V, T> innerStore) {
super(innerStore);
>>>>>>> YOURS
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e) {
this(innerStore, e, "unknown");
}
public ForceFailStore(Store<K, V> innerStore, VoldemortException e, Object identifier) {
super(innerStore);
this.e = e;
this.identifier = identifier;
}
public void setFail(boolean fail) {
this.fail = fail;
}
public Object getIdentifier() {
return identifier;
}
@Override
public void put(K key, Versioned<V> value) throws VoldemortException {
if(fail)
throw e;
getInnerStore().put(key, value);
}
@Override
public boolean delete(K key, Version version) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().delete(key, version);
}
@Override
public Map<K, List<Versioned<V>>> getAll(Iterable<K> keys) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().getAll(keys);
}
@Override
public List<Versioned<V>> get(K key) throws VoldemortException {
if(fail)
throw e;
return getInnerStore().get(key);
}
}
Diff Result
No diff
Case 91 - java_voldemort.rev_6efd9_d55c7..NonblockingStore.java
Base
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback);
public void submitGetAllRequest(Iterable<ByteArray> keys, NonblockingStoreCallback callback);
public void submitGetVersionsRequest(ByteArray key, NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
NonblockingStoreCallback callback);
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback);
public void submitGetAllRequest(Iterable<ByteArray> keys, NonblockingStoreCallback callback);
public void submitGetVersionsRequest(ByteArray key, NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
NonblockingStoreCallback callback);
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}
Left
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
public void submitGetAllRequest(Iterable<ByteArray> keys,
NonblockingStoreCallback callback,
long timeoutMs);
public void submitGetVersionsRequest(ByteArray key,
NonblockingStoreCallback callback,
long timeoutMs);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
NonblockingStoreCallback callback,
long timeoutMs);
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback,
long timeoutMs);
public void close() throws VoldemortException;
}/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
public void submitGetAllRequest(Iterable<ByteArray> keys,
NonblockingStoreCallback callback,
long timeoutMs);
public void submitGetVersionsRequest(ByteArray key,
NonblockingStoreCallback callback,
long timeoutMs);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
NonblockingStoreCallback callback,
long timeoutMs);
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback,
long timeoutMs);
public void close() throws VoldemortException;
}
Right
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
public void submitGetAllRequest(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms,
NonblockingStoreCallback callback);
public void submitGetVersionsRequest(ByteArray key, NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
byte[] transforms,
NonblockingStoreCallback callback);
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
public void submitGetAllRequest(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms,
NonblockingStoreCallback callback);
public void submitGetVersionsRequest(ByteArray key, NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
byte[] transforms,
NonblockingStoreCallback callback);
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}
MergeMethods
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
<<<<<<< MINE
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitGetAllRequest(Iterable<ByteArray> keys,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitGetVersionsRequest(ByteArray key,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitGetAllRequest(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms,
NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
byte[] transforms,
NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
<<<<<<< MINE
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitGetAllRequest(Iterable<ByteArray> keys,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitGetVersionsRequest(ByteArray key,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitGetAllRequest(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms,
NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
byte[] transforms,
NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}
KeepBothMethods
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
public interface NonblockingStore {
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
public void submitGetAllRequest(Iterable<ByteArray> keys, NonblockingStoreCallback callback, long timeoutMs);
public void submitGetVersionsRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
public void submitPutRequest(ByteArray key, Versioned<byte[]> value, NonblockingStoreCallback callback, long timeoutMs);
public void submitDeleteRequest(ByteArray key, Version version, NonblockingStoreCallback callback, long timeoutMs);
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
public void submitGetAllRequest(Iterable<ByteArray> keys, Map<ByteArray, byte[]> transforms, NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key, Versioned<byte[]> value, byte[] transforms, NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
public interface NonblockingStore {
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
public void submitGetAllRequest(Iterable<ByteArray> keys, NonblockingStoreCallback callback, long timeoutMs);
public void submitGetVersionsRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
public void submitPutRequest(ByteArray key, Versioned<byte[]> value, NonblockingStoreCallback callback, long timeoutMs);
public void submitDeleteRequest(ByteArray key, Version version, NonblockingStoreCallback callback, long timeoutMs);
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
public void submitGetAllRequest(Iterable<ByteArray> keys, Map<ByteArray, byte[]> transforms, NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key, Versioned<byte[]> value, byte[] transforms, NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}
Safe
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
<<<<<<< MINE
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitGetAllRequest(Iterable<ByteArray> keys,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitGetVersionsRequest(ByteArray key,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitGetAllRequest(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms,
NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
byte[] transforms,
NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
<<<<<<< MINE
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitGetAllRequest(Iterable<ByteArray> keys,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitGetVersionsRequest(ByteArray key,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
<<<<<<< MINE
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback,
long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitGetAllRequest(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms,
NonblockingStoreCallback callback);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
byte[] transforms,
NonblockingStoreCallback callback);
public void close() throws VoldemortException;
}
Unstructured
/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
<<<<<<< MINE
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitGetAllRequest(Iterable<ByteArray> keys,
<<<<<<< MINE
NonblockingStoreCallback callback,
long timeoutMs);
=======
Map<ByteArray, byte[]> transforms,
NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitGetVersionsRequest(ByteArray key,
NonblockingStoreCallback callback,
long timeoutMs);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
<<<<<<< MINE
NonblockingStoreCallback callback,
long timeoutMs);
=======
byte[] transforms,
NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback,
long timeoutMs);
public void close() throws VoldemortException;
}/*
* Copyright 2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.nonblockingstore;
import java.util.Map;
import voldemort.VoldemortException;
import voldemort.store.Store;
import voldemort.store.routed.RoutedStore;
import voldemort.store.socket.SocketStore;
import voldemort.utils.ByteArray;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A NonblockingStore mimics the {@link Store} interface but instead of blocking
* for the request to complete, it simply submits it for later processing and
* returns immediately. When the request is processed, the provided
* {@link NonblockingStoreCallback callback} is invoked in order to provide
* interested parties with the results of the request.
*
* <p/>
*
* At this point, the NonblockingStore is used from within the
* {@link RoutedStore} in order to provide asynchronous processing against
* multiple remote stores in parallel using a single thread approach.
*
* <p/>
*
* There are two main implementations:
*
* <ol>
* <li>{@link ThreadPoolBasedNonblockingStoreImpl} wraps a "blocking"
* {@link Store} inside a thread pool to provide a submit-and-return style of
* asynchronous request/response. This is useful for the case where the Store
* implementation is not based on {@link SocketStore} and as such cannot use
* NIO-based non-blocking networking.
* <li>{@link SocketStore} uses NIO to submit a request to the networking layer
* which will process <b>all</b> requests to remote servers in a dedicated
* thread.
* </ol>
*
* @see Store
* @see ThreadPoolBasedNonblockingStoreImpl
* @see SocketStore
* @see NonblockingStoreCallback
* @see RoutedStore
*/
public interface NonblockingStore {
<<<<<<< MINE
public void submitGetRequest(ByteArray key, NonblockingStoreCallback callback, long timeoutMs);
=======
public void submitGetRequest(ByteArray key, byte[] transforms, NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitGetAllRequest(Iterable<ByteArray> keys,
<<<<<<< MINE
NonblockingStoreCallback callback,
long timeoutMs);
=======
Map<ByteArray, byte[]> transforms,
NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitGetVersionsRequest(ByteArray key,
NonblockingStoreCallback callback,
long timeoutMs);
public void submitPutRequest(ByteArray key,
Versioned<byte[]> value,
<<<<<<< MINE
NonblockingStoreCallback callback,
long timeoutMs);
=======
byte[] transforms,
NonblockingStoreCallback callback);
>>>>>>> YOURS
public void submitDeleteRequest(ByteArray key,
Version version,
NonblockingStoreCallback callback,
long timeoutMs);
public void close() throws VoldemortException;
}
Diff Result
No diff
Case 92 - java_voldemort.rev_6efd9_d55c7..PipelineRoutedStore.java
Base
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Versioned<byte[]>> get(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, callback);
}
};
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
nonblockingStoreRequest,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS,
timeoutMs,
TimeUnit.MILLISECONDS);
NonblockingStoreRequest storeRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetVersionsRequest(key, callback);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
storeRequest,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
NonblockingStoreRequest nonblockingDelete = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitDeleteRequest(key, version, callback);
}
};
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
nonblockingDelete,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingDelete));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Versioned<byte[]>> get(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, callback);
}
};
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
nonblockingStoreRequest,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS,
timeoutMs,
TimeUnit.MILLISECONDS);
NonblockingStoreRequest storeRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetVersionsRequest(key, callback);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
storeRequest,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
NonblockingStoreRequest nonblockingDelete = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitDeleteRequest(key, version, callback);
}
};
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
nonblockingDelete,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingDelete));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
Left
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
Right
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
boolean allowReadRepair = repairReads && transforms == null;
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, transforms, callback);
}
};
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) {
return store.get(key, transforms);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
nonblockingStoreRequest,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
transforms,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS,
timeoutMs,
TimeUnit.MILLISECONDS);
NonblockingStoreRequest storeRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetVersionsRequest(key, callback);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
storeRequest,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
NonblockingStoreRequest nonblockingDelete = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitDeleteRequest(key, version, callback);
}
};
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
nonblockingDelete,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingDelete));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
Event.COMPLETED,
key,
transforms,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
transforms,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
boolean allowReadRepair = repairReads && transforms == null;
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, transforms, callback);
}
};
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) {
return store.get(key, transforms);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
nonblockingStoreRequest,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
transforms,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS,
timeoutMs,
TimeUnit.MILLISECONDS);
NonblockingStoreRequest storeRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetVersionsRequest(key, callback);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
storeRequest,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
NonblockingStoreRequest nonblockingDelete = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitDeleteRequest(key, version, callback);
}
};
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
nonblockingDelete,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingDelete));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
Event.COMPLETED,
key,
transforms,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
transforms,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
MergeMethods
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
<<<<<<< MINE
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
<<<<<<< MINE
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
KeepBothMethods
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
boolean allowReadRepair = repairReads && transforms == null;
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, transforms, callback);
}
};
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) {
return store.get(key, transforms);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
nonblockingStoreRequest,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
transforms,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
<<<<<<< MINE
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
Event.COMPLETED,
key,
transforms,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
transforms,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
boolean allowReadRepair = repairReads && transforms == null;
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, transforms, callback);
}
};
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) {
return store.get(key, transforms);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
nonblockingStoreRequest,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
transforms,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
<<<<<<< MINE
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
Event.COMPLETED,
key,
transforms,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
transforms,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
Safe
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
boolean allowReadRepair = repairReads && transforms == null;
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, transforms, callback);
}
};
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) {
return store.get(key, transforms);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
nonblockingStoreRequest,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
transforms,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
<<<<<<< MINE
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
<<<<<<< MINE
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
Event.COMPLETED,
key,
transforms,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
transforms,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
=======
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
>>>>>>> YOURS
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[]> store) {
return store.get(key);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(repairReads)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param routingStrategy The strategy for choosing a node given a key
* @param requiredReads The minimum number of reads that must complete
* before the operation will return
* @param requiredWrites The minimum number of writes that must complete
* before the operation will return
* @param threadPool The threadpool to use
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
boolean allowReadRepair = repairReads && transforms == null;
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, transforms, callback);
}
};
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) {
return store.get(key, transforms);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
nonblockingStoreRequest,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
transforms,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
<<<<<<< MINE
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
<<<<<<< MINE
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
Event.COMPLETED,
key,
transforms,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
transforms,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
=======
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
>>>>>>> YOURS
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}
Unstructured
/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
<<<<<<< MINE
=======
boolean allowReadRepair = repairReads && transforms == null;
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, transforms, callback);
}
};
>>>>>>> YOURS
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) {
return store.get(key, transforms);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
<<<<<<< MINE
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
<<<<<<< MINE
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
transforms,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
<<<<<<< MINE
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
>>>>>>> YOURS
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
<<<<<<< MINE
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
<<<<<<< MINE
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
=======
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
>>>>>>> YOURS
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
transforms,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
transforms,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}/*
* Copyright 2008-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store.routed;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import voldemort.VoldemortException;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.cluster.Zone;
import voldemort.cluster.failuredetector.FailureDetector;
import voldemort.routing.RoutingStrategyType;
import voldemort.store.Store;
import voldemort.store.StoreDefinition;
import voldemort.store.StoreRequest;
import voldemort.store.StoreUtils;
import voldemort.store.nonblockingstore.NonblockingStore;
import voldemort.store.nonblockingstore.NonblockingStoreCallback;
import voldemort.store.nonblockingstore.NonblockingStoreRequest;
import voldemort.store.routed.Pipeline.Event;
import voldemort.store.routed.Pipeline.Operation;
import voldemort.store.routed.action.ConfigureNodes;
import voldemort.store.routed.action.GetAllConfigureNodes;
import voldemort.store.routed.action.GetAllReadRepair;
import voldemort.store.routed.action.IncrementClock;
import voldemort.store.routed.action.PerformDeleteHintedHandoff;
import voldemort.store.routed.action.PerformPutHintedHandoff;
import voldemort.store.routed.action.PerformParallelGetAllRequests;
import voldemort.store.routed.action.PerformParallelPutRequests;
import voldemort.store.routed.action.PerformParallelRequests;
import voldemort.store.routed.action.PerformSerialGetAllRequests;
import voldemort.store.routed.action.PerformSerialPutRequests;
import voldemort.store.routed.action.PerformSerialRequests;
import voldemort.store.routed.action.PerformZoneSerialRequests;
import voldemort.store.routed.action.ReadRepair;
import voldemort.store.slop.HintedHandoff;
import voldemort.store.slop.HintedHandoffStrategy;
import voldemort.store.slop.HintedHandoffStrategyFactory;
import voldemort.store.slop.Slop;
import voldemort.utils.ByteArray;
import voldemort.utils.SystemTime;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
/**
* A Store which multiplexes requests to different internal Stores
*
*
*/
public class PipelineRoutedStore extends RoutedStore {
private final Map<Integer, NonblockingStore> nonblockingStores;
private final Map<Integer, Store<ByteArray, Slop>> slopStores;
private final HintedHandoffStrategy handoffStrategy;
private Zone clientZone;
private boolean zoneRoutingEnabled;
/**
* Create a PipelineRoutedStore
*
* @param name The name of the store
* @param innerStores The mapping of node to client
* @param nonblockingStores
* @param slopStores The stores for hints
* @param cluster Cluster definition
* @param storeDef Store definition
* @param repairReads Is read repair enabled?
* @param clientZoneId Zone the client is in
* @param timeoutMs Routing timeout
* @param failureDetector Failure detector object
*/
public PipelineRoutedStore(String name,
Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
Map<Integer, NonblockingStore> nonblockingStores,
Map<Integer, Store<ByteArray, Slop>> slopStores,
Cluster cluster,
StoreDefinition storeDef,
boolean repairReads,
int clientZoneId,
long timeoutMs,
FailureDetector failureDetector) {
super(name,
innerStores,
cluster,
storeDef,
repairReads,
timeoutMs,
failureDetector,
SystemTime.INSTANCE);
this.clientZone = cluster.getZoneById(clientZoneId);
if(storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
zoneRoutingEnabled = true;
} else {
zoneRoutingEnabled = false;
}
this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
this.slopStores = slopStores;
if(storeDef.isHintedHandoffEnabled()) {
HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled,
clientZone.getId());
this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
} else {
this.handoffStrategy = null;
}
}
public List<Versioned<byte[]>> get(final ByteArray key, final byte[] transforms) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
final Pipeline pipeline = new Pipeline(Operation.GET, timeoutMs, TimeUnit.MILLISECONDS);
<<<<<<< MINE
=======
boolean allowReadRepair = repairReads && transforms == null;
NonblockingStoreRequest nonblockingStoreRequest = new NonblockingStoreRequest() {
public void submit(Node node, NonblockingStore store, NonblockingStoreCallback callback) {
store.submitGetRequest(key, transforms, callback);
}
};
>>>>>>> YOURS
StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() {
public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) {
return store.get(key, transforms);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
<<<<<<< MINE
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
blockingStoreRequest,
null));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>(pipelineData,
<<<<<<< MINE
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingStoreRequest));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
for(Response<ByteArray, List<Versioned<byte[]>>> response: pipelineData.getResponses()) {
List<Versioned<byte[]>> value = response.getValue();
if(value != null)
results.addAll(value);
}
return results;
}
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
Map<ByteArray, byte[]> transforms)
throws VoldemortException {
StoreUtils.assertValidKeys(keys);
boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0);
GetAllPipelineData pipelineData = new GetAllPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_ALL, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new GetAllConfigureNodes(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
routingStrategy,
keys,
transforms,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelGetAllRequests(pipelineData,
Event.INSUFFICIENT_SUCCESSES,
failureDetector,
timeoutMs,
nonblockingStores));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialGetAllRequests(pipelineData,
<<<<<<< MINE
repairReads ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
allowReadRepair ? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
>>>>>>> YOURS
keys,
failureDetector,
innerStores,
storeDef.getPreferredReads(),
storeDef.getRequiredReads()));
if(allowReadRepair)
pipeline.addEventAction(Event.RESPONSES_RECEIVED,
new GetAllReadRepair(pipelineData,
Event.COMPLETED,
storeDef.getPreferredReads(),
timeoutMs,
nonblockingStores,
readRepairer));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
return pipelineData.getResult();
}
public List<Version> getVersions(final ByteArray key) {
StoreUtils.assertValidKey(key);
BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountReads());
else
pipelineData.setZonesRequired(null);
Pipeline pipeline = new Pipeline(Operation.GET_VERSIONS, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredReads(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>(pipelineData,
Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredReads(),
storeDef.getRequiredReads(),
timeoutMs,
nonblockingStores,
null,
null,
null,
null));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
List<Version> results = new ArrayList<Version>();
for(Response<ByteArray, List<Version>> response: pipelineData.getResponses())
results.addAll(response.getValue());
return results;
}
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.DELETE, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
StoreRequest<Boolean> blockingDelete = new StoreRequest<Boolean>() {
public Boolean request(Store<ByteArray, byte[], byte[]> store) {
return store.delete(key, version);
}
};
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformParallelRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff,
version,
Event.INSUFFICIENT_SUCCESSES,
Event.INSUFFICIENT_ZONES));
pipeline.addEventAction(Event.INSUFFICIENT_SUCCESSES,
new PerformSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
failureDetector,
innerStores,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
blockingDelete,
null));
if(zoneRoutingEnabled)
pipeline.addEventAction(Event.INSUFFICIENT_ZONES,
new PerformZoneSerialRequests<Boolean, BasicPipelineData<Boolean>>(pipelineData,
<<<<<<< MINE
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
=======
Event.COMPLETED,
>>>>>>> YOURS
key,
failureDetector,
innerStores,
blockingDelete));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff(pipelineData,
Event.COMPLETED,
key,
version,
hintedHandoff));
pipeline.addEventAction(Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData,
Event.ERROR,
key,
version,
hintedHandoff));
}
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
for(Response<ByteArray, Boolean> response: pipelineData.getResponses()) {
if(response.getValue().booleanValue())
return true;
}
return false;
}
<<<<<<< MINE
public boolean isHintedHandoffEnabled() {
return slopStores != null;
}
public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
=======
public void put(ByteArray key, Versioned<byte[]> versioned, byte[] transforms)
throws VoldemortException {
>>>>>>> YOURS
StoreUtils.assertValidKey(key);
PutPipelineData pipelineData = new PutPipelineData();
if(zoneRoutingEnabled)
pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
else
pipelineData.setZonesRequired(null);
pipelineData.setStartTimeNs(System.nanoTime());
pipelineData.setStoreName(name);
Pipeline pipeline = new Pipeline(Operation.PUT, timeoutMs, TimeUnit.MILLISECONDS);
pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());
HintedHandoff hintedHandoff = null;
if(isHintedHandoffEnabled())
hintedHandoff = new HintedHandoff(failureDetector,
slopStores,
handoffStrategy,
pipelineData.getFailedNodes());
pipeline.addEventAction(Event.STARTED,
new ConfigureNodes<Void, PutPipelineData>(pipelineData,
Event.CONFIGURED,
failureDetector,
storeDef.getRequiredWrites(),
routingStrategy,
key,
clientZone));
pipeline.addEventAction(Event.CONFIGURED,
new PerformSerialPutRequests(pipelineData,
isHintedHandoffEnabled()
? Event.RESPONSES_RECEIVED
: Event.COMPLETED,
key,
transforms,
failureDetector,
innerStores,
storeDef.getRequiredWrites(),
versioned,
time,
Event.MASTER_DETERMINED));
pipeline.addEventAction(Event.MASTER_DETERMINED,
new PerformParallelPutRequests(pipelineData,
Event.RESPONSES_RECEIVED,
key,
transforms,
failureDetector,
storeDef.getPreferredWrites(),
storeDef.getRequiredWrites(),
timeoutMs,
nonblockingStores,
hintedHandoff));
if(isHintedHandoffEnabled()) {
pipeline.addEventAction(Event.ABORTED, new PerformPutHintedHandoff(pipelineData,
Event.ERROR,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff(pipelineData,
Event.HANDOFF_FINISHED,
key,
versioned,
hintedHandoff,
time));
pipeline.addEventAction(Event.HANDOFF_FINISHED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
} else
pipeline.addEventAction(Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData,
Event.COMPLETED,
versioned,
time));
pipeline.addEvent(Event.STARTED);
pipeline.execute();
if(pipelineData.getFatalError() != null)
throw pipelineData.getFatalError();
}
@Override
public void close() {
VoldemortException exception = null;
for(NonblockingStore store: nonblockingStores.values()) {
try {
store.close();
} catch(VoldemortException e) {
exception = e;
}
}
if(exception != null)
throw exception;
super.close();
}
}